From e925a239ffdb3594edba59978c67e622eecbb4a7 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 13:20:19 -0300 Subject: [PATCH 01/34] pqd: Removed --execute, --execute-throttle & --mirror --- bin/pt-query-digest | 435 +----------------- t/pt-query-digest/execute.t | 92 ---- t/pt-query-digest/mirror.t | 105 ----- .../samples/slow018_execute_report_1.txt | 41 -- .../samples/slow018_execute_report_2.txt | 30 -- 5 files changed, 12 insertions(+), 691 deletions(-) delete mode 100644 t/pt-query-digest/execute.t delete mode 100644 t/pt-query-digest/mirror.t delete mode 100644 t/pt-query-digest/samples/slow018_execute_report_1.txt delete mode 100644 t/pt-query-digest/samples/slow018_execute_report_2.txt diff --git a/bin/pt-query-digest b/bin/pt-query-digest index c31c4fbf..386db7ba 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -41,7 +41,6 @@ BEGIN { RawLogParser ProtocolParser HTTPProtocolParser - ExecutionThrottler MasterSlave Progress FileIterator @@ -10346,143 +10345,6 @@ sub _d { # End HTTPProtocolParser package # ########################################################################### -# ########################################################################### -# ExecutionThrottler package -# This package is a copy without comments from the original. The original -# with comments and its test file can be found in the Bazaar repository at, -# lib/ExecutionThrottler.pm -# t/lib/ExecutionThrottler.t -# See https://launchpad.net/percona-toolkit for more information. -# ########################################################################### -{ -package ExecutionThrottler; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use constant PTDEBUG => $ENV{PTDEBUG} || 0; - -use List::Util qw(sum min max); -use Time::HiRes qw(time); -use Data::Dumper; -$Data::Dumper::Indent = 1; -$Data::Dumper::Sortkeys = 1; -$Data::Dumper::Quotekeys = 0; - -sub new { - my ( $class, %args ) = @_; - my @required_args = qw(rate_max get_rate check_int step); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $self = { - step => 0.05, # default - %args, - rate_ok => undef, - last_check => undef, - stats => { - rate_avg => 0, - rate_samples => [], - }, - int_rates => [], - skip_prob => 0.0, - }; - - return bless $self, $class; -} - -sub throttle { - my ( $self, %args ) = @_; - my $time = $args{misc}->{time} || time; - if ( $self->_time_to_check($time) ) { - my $rate_avg = (sum(@{$self->{int_rates}}) || 0) - / (scalar @{$self->{int_rates}} || 1); - my $running_avg = $self->_save_rate_avg($rate_avg); - PTDEBUG && _d('Average rate for last interval:', $rate_avg); - - if ( $args{stats} ) { - $args{stats}->{throttle_checked_rate}++; - $args{stats}->{throttle_rate_avg} = sprintf '%.2f', $running_avg; - } - - @{$self->{int_rates}} = (); - - if ( $rate_avg > $self->{rate_max} ) { - $self->{skip_prob} += $self->{step}; - $self->{skip_prob} = 1.0 if $self->{skip_prob} > 1.0; - PTDEBUG && _d('Rate max exceeded'); - $args{stats}->{throttle_rate_max_exceeded}++ if $args{stats}; - } - else { - $self->{skip_prob} -= $self->{step}; - $self->{skip_prob} = 0.0 if $self->{skip_prob} < 0.0; - $args{stats}->{throttle_rate_ok}++ if $args{stats}; - } - - PTDEBUG && _d('Skip probability:', $self->{skip_prob}); - $self->{last_check} = $time; - } - else { - my $current_rate = $self->{get_rate}->(); - push @{$self->{int_rates}}, $current_rate; - if ( $args{stats} ) { - $args{stats}->{throttle_rate_min} = min( - ($args{stats}->{throttle_rate_min} || ()), $current_rate); - $args{stats}->{throttle_rate_max} = max( - ($args{stats}->{throttle_rate_max} || ()), $current_rate); - } - PTDEBUG && _d('Current rate:', $current_rate); - } - - if ( $args{event} ) { - $args{event}->{Skip_exec} = $self->{skip_prob} <= rand() ? 'No' : 'Yes'; - } - - return $args{event}; -} - -sub _time_to_check { - my ( $self, $time ) = @_; - if ( !$self->{last_check} ) { - $self->{last_check} = $time; - return 0; - } - return $time - $self->{last_check} >= $self->{check_int} ? 1 : 0; -} - -sub rate_avg { - my ( $self ) = @_; - return $self->{stats}->{rate_avg} || 0; -} - -sub skip_probability { - my ( $self ) = @_; - return $self->{skip_prob}; -} - -sub _save_rate_avg { - my ( $self, $rate ) = @_; - my $samples = $self->{stats}->{rate_samples}; - push @$samples, $rate; - shift @$samples if @$samples > 1_000; - $self->{stats}->{rate_avg} = sum(@$samples) / (scalar @$samples); - return $self->{stats}->{rate_avg} || 0; -} - -sub _d { - my ($package, undef, $line) = caller 0; - @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } - map { defined $_ ? $_ : 'undef' } - @_; - print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; -} - -1; -} -# ########################################################################### -# End ExecutionThrottler package -# ########################################################################### - # ########################################################################### # MasterSlave package # This package is a copy without comments from the original. The original @@ -13312,7 +13174,6 @@ use sigtrap 'handler', \&sig_int, 'normal-signals'; # Global variables. Only really essential variables should be here. my $oktorun = 1; -my $ex_dbh; # For --execute my $ep_dbh; # For --explain my $ps_dbh; # For Processlist my $aux_dbh; # For --aux-dsn (--since/--until "MySQL expression") @@ -13349,26 +13210,11 @@ sub main { $o->save_error('The --review DSN requires a D (database) and t' . ' (table) part specifying the query review table'); } - if ( $o->get('mirror') - && (!$o->get('execute') || !$o->get('processlist')) ) { - $o->save_error('--mirror requires --execute and --processlist'); - } if ( $o->get('outliers') && grep { $_ !~ m/^\w+:[0-9.]+(?::[0-9.]+)?$/ } @{$o->get('outliers')} ) { $o->save_error('--outliers requires two or three colon-separated fields'); } - if ( $o->get('execute-throttle') ) { - my ($rate_max, $int, $step) = @{$o->get('execute-throttle')}; - $o->save_error("--execute-throttle max time must be between 1 and 100") - unless $rate_max && $rate_max > 0 && $rate_max <= 100; - $o->save_error("No check interval value for --execute-throttle") - unless $int; - $o->save_error("--execute-throttle check interval must be an integer") - if $int =~ m/[^\d]/; - $o->save_error("--execute-throttle step must be between 1 and 100") - if $step && ($step < 1 || $step > 100); - } if ( $o->get('progress') ) { eval { Progress->validate_spec($o->get('progress')) }; if ( $EVAL_ERROR ) { @@ -13598,7 +13444,7 @@ sub main { # Enable timings to instrument code for either of these two opts. # Else, don't instrument to avoid cost of measurement. - my $instrument = $o->get('pipeline-profile') || $o->get('execute-throttle'); + my $instrument = $o->get('pipeline-profile'); PTDEBUG && _d('Instrument:', $instrument); my $pipeline = new Pipeline( @@ -13722,14 +13568,12 @@ sub main { $err = $EVAL_ERROR; if ( $err ) { # Try to reconnect when there's an error. eval { - ($cur_server, $ps_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ps_dbh, - current => $cur_server, - read_only => 0, - comment => 'for --processlist' - ); + if ( !$ps_dbh || !$ps_dbh->ping ) { + PTDEBUG && _d('Getting a dbh from', $cur_server); + $ps_dbh = $dp->get_dbh( + $dp->get_cxn_params($o->get($cur_server)), {AutoCommit => 1}); + $ps_dbh->{InactiveDestroy} = 1; # Don't die on fork(). + } $cur_time = time(); $sth = $ps_dbh->prepare('SHOW FULL PROCESSLIST'); $cxn = $ps_dbh->{mysql_thread_id}; @@ -13742,18 +13586,6 @@ sub main { } } } until ( $sth && !$err ); - if ( $o->get('mirror') - && time() - $cur_time > $o->get('mirror')) { - ($cur_server, $ps_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ps_dbh, - current => $cur_server, - read_only => 0, - comment => 'for --processlist' - ); - $cur_time = time(); - } return [ grep { $_->[0] != $cxn } @{ $sth->fetchall_arrayref(); } ]; }; @@ -13946,7 +13778,7 @@ sub main { ); $aux_dbh->{InactiveDestroy} = 1; # Don't die on fork(). } - $aux_dbh ||= $qv_dbh || $qv_dbh2 || $ex_dbh || $ps_dbh || $ep_dbh; + $aux_dbh ||= $qv_dbh || $qv_dbh2 || $ps_dbh || $ep_dbh; PTDEBUG && _d('aux dbh:', $aux_dbh); my $time_callback = sub { @@ -14392,139 +14224,6 @@ sub main { } } # sample - my $ex_dsn; - { # execute throttle and execute - my $et; - if ( my $et_args = $o->get('execute-throttle') ) { - # These were check earlier; no need to check them again. - my ($rate_max, $int, $step) = @{$o->get('execute-throttle')}; - $step ||= 5; - $step /= 100; # step specified as percent but $et expect 0.1=10%, etc. - PTDEBUG && _d('Execute throttle:', $rate_max, $int, $step); - - my $get_rate = sub { - my $instrument = $pipeline->instrumentation; - return percentage_of( - $instrument->{execute}->{time} || 0, - $instrument->{Pipeline}->{time} || 0, - ); - }; - - $et = new ExecutionThrottler( - rate_max => $rate_max, - get_rate => $get_rate, - check_int => $int, - step => $step, - ); - - $pipeline->add( - name => 'execute throttle', - process => sub { - my ( $args ) = @_; - $args->{event} = $et->throttle( - event => $args->{event}, - stats => \%stats, - misc => $args->{misc}, - ); - return $args; - }, - ); - } # execute throttle - - if ( $ex_dsn = $o->get('execute') ) { - if ( $o->get('ask-pass') ) { - $ex_dsn->{p} = OptionParser::prompt_noecho("Enter password for " - . "--execute: "); - $o->set('execute', $ex_dsn); - } - - my $cur_server = 'execute'; - ($cur_server, $ex_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ex_dbh, - current => $cur_server, - read_only => 1, - comment => 'for --execute' - ); - my $cur_time = time(); - my $curdb; - my $default_db = $o->get('execute')->{D}; - PTDEBUG && _d('Default db:', $default_db); - - $pipeline->add( - name => 'execute', - process => sub { - my ( $args ) = @_; - my $event = $args->{event}; - $event->{Exec_orig_time} = $event->{Query_time}; - if ( ($event->{Skip_exec} || '') eq 'Yes' ) { - PTDEBUG && _d('Not executing event because of ', - '--execute-throttle'); - # Zero Query_time to 'Exec time' will show the real time - # spent executing queries. - $event->{Query_time} = 0; - $stats{execute_skipped}++; - return $args; - } - $stats{execute_executed}++; - my $db = $event->{db} || $default_db; - eval { - if ( $db && (!$curdb || $db ne $curdb) ) { - $ex_dbh->do("USE $db"); - $curdb = $db; - } - my $start = time(); - $ex_dbh->do($event->{arg}); - my $end = time(); - $event->{Query_time} = $end - $start; - $event->{Exec_diff_time} - = $event->{Query_time} - $event->{Exec_orig_time}; - if ($o->get('mirror') && $end-$cur_time > $o->get('mirror')) { - ($cur_server, $ex_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ex_dbh, - current => $cur_server, - read_only => 1, - comment => 'for --execute' - ); - $cur_time = $end; - } - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d($EVAL_ERROR); - $stats{execute_error}++; - # Don't try to re-execute the statement. Just skip it. - if ( $EVAL_ERROR =~ m/server has gone away/ ) { - print STDERR $EVAL_ERROR; - eval { - ($cur_server, $ex_dbh) = find_role( - OptionParser => $o, - DSNParser => $dp, - dbh => $ex_dbh, - current => $cur_server, - read_only => 1, - comment => 'for --execute' - ); - $cur_time = time(); - }; - if ( $EVAL_ERROR ) { - print STDERR $EVAL_ERROR; - sleep 1; - } - return; - } - if ( $EVAL_ERROR =~ m/No database/ ) { - $stats{execute_no_database}++; - } - } - return $args; - }, - ); - } # execute - } # execute throttle and execute - if ( $o->get('print') ) { my $w = new SlowLogWriter(); $pipeline->add( @@ -14633,7 +14332,6 @@ sub main { instances => [ ($qv_dbh ? { dbh => $qv_dbh, dsn => $review_dsn } : ()), ($ps_dbh ? { dbh => $ps_dbh, dsn => $ps_dsn } : ()), - ($ex_dbh ? { dbh => $ex_dbh, dsn => $ex_dsn } : ()) ], protocol => $o->get('version-check'), ); @@ -14664,7 +14362,7 @@ sub main { PTDEBUG && _d('Disconnected dbh', $_); } grep { $_ } - ($qv_dbh, $qv_dbh2, $ex_dbh, $ps_dbh, $ep_dbh, $aux_dbh); + ($qv_dbh, $qv_dbh2, $ps_dbh, $ep_dbh, $aux_dbh); return 0; } # End main() @@ -14819,45 +14517,6 @@ sub print_reports { return; } -# Pass in the currently open $dbh (if any), where $current points to ('execute' -# or 'processlist') and whether you want to be connected to the read_only -# server. Get back which server you're looking at, and the $dbh. Assumes that -# one of the servers is ALWAYS read only and the other is ALWAYS not! If -# there's some transition period where this isn't true, maybe both will end up -# pointing to the same place, but that should resolve shortly. -# The magic switching functionality only works if --mirror is given! Otherwise -# it just returns the correct $dbh. $comment is some descriptive text for -# debuggin, like 'for --execute'. -sub find_role { - my ( %args ) = @_; - my $o = $args{OptionParser}; - my $dp = $args{DSNParser}; - my $dbh = $args{dbh}; - my $current = $args{current}; - my $read_only = $args{read_only}; - my $comment = $args{comment}; - - if ( !$dbh || !$dbh->ping ) { - PTDEBUG && _d('Getting a dbh from', $current, $comment); - $dbh = $dp->get_dbh( - $dp->get_cxn_params($o->get($current)), {AutoCommit => 1}); - $dbh->{InactiveDestroy} = 1; # Don't die on fork(). - } - if ( $o->get('mirror') ) { - my ( $is_read_only ) = $dbh->selectrow_array('SELECT @@global.read_only'); - PTDEBUG && _d("read_only on", $current, $comment, ':', - $is_read_only, '(want', $read_only, ')'); - if ( $is_read_only != $read_only ) { - $current = $current eq 'execute' ? 'processlist' : 'execute'; - PTDEBUG && _d("read_only wrong", $comment, "getting a dbh from", $current); - $dbh = $dp->get_dbh( - $dp->get_cxn_params($o->get($current)), {AutoCommit => 1}); - $dbh->{InactiveDestroy} = 1; # Don't die on fork(). - } - } - return ($current, $dbh); -} - # Catches signals so we can exit gracefully. sub sig_int { my ( $signal ) = @_; @@ -15201,12 +14860,6 @@ server running on host1. See L<"--review"> for more on reviewing queries: pt-query-digest --review h=host1,D=test,t=query_review /path/to/slow.log -Filter out everything but SELECT queries, replay the queries against another -server, then use the timings from replaying them to analyze their performance: - - pt-query-digest /path/to/slow.log --execute h=another_server \ - --filter '$event->{fingerprint} =~ m/^select/' - Print the structure of events so you can construct a complex L<"--filter">: pt-query-digest /path/to/slow.log --no-report \ @@ -15235,8 +14888,7 @@ is safe to run even on production systems, but you might want to monitor it until you are satisfied that the input you give it does not cause undue load. Various options will cause pt-query-digest to insert data into tables, execute -SQL queries, and so on. These include the L<"--execute"> option and -L<"--review">. +SQL queries, and so on. These include the L<"--review"> options. At the time of this release, we know of no bugs that could cause serious harm to users. @@ -15797,50 +15449,6 @@ The second one splits it into attribute-value pairs and adds them to the event: B: All commas in the regex patterns must be escaped with \ otherwise the pattern will break. -=item --execute - -type: DSN - -Execute queries on this DSN. - -Adds a callback into the chain, after filters but before the reports. Events -are executed on this DSN. If they are successful, the time they take to execute -overwrites the event's Query_time attribute and the original Query_time value -(from the log) is saved as the Exec_orig_time attribute. If unsuccessful, -the callback returns false and terminates the chain. - -If the connection fails, pt-query-digest tries to reconnect once per second. - -See also L<"--mirror"> and L<"--execute-throttle">. - -=item --execute-throttle - -type: array - -Throttle values for L<"--execute">. - -By default L<"--execute"> runs without any limitations or concerns for the -amount of time that it takes to execute the events. The L<"--execute-throttle"> -allows you to limit the amount of time spent doing L<"--execute"> relative -to the other processes that handle events. This works by marking some events -with a C attribute when L<"--execute"> begins to take too much time. -L<"--execute"> will not execute an event if this attribute is true. This -indirectly decreases the time spent doing L<"--execute">. - -The L<"--execute-throttle"> option takes at least two comma-separated values: -max allowed L<"--execute"> time as a percentage and a check interval time. An -optional third value is a percentage step for increasing and decreasing the -probability that an event will be marked C true. 5 (percent) is -the default step. - -For example: L<"--execute-throttle"> C<70,60,10>. This will limit -L<"--execute"> to 70% of total event processing time, checked every minute -(60 seconds) and probability stepped up and down by 10%. When L<"--execute"> -exceeds 70%, the probability that events will be marked C true -increases by 10%. L<"--execute"> time is checked again after another minute. -If it's still above 70%, then the probability will increase another 10%. -Or, if it's dropped below 70%, then the probability will decrease by 10%. - =item --expected-range type: array; default: 5,10 @@ -16089,10 +15697,6 @@ which do not have them. For example, if one event has the db attribute equal to "foo", but the next event doesn't have the db attribute, then it inherits "foo" for its db attribute. -Inheritance is usually desirable, but in some cases it might confuse things. -If a query inherits a database that it doesn't actually use, then this could -confuse L<"--execute">. - =item --interval type: float; default: .1 @@ -16135,20 +15739,6 @@ type: string Print all output to this file when daemonized. -=item --mirror - -type: float - -How often to check whether connections should be moved, depending on -C. Requires L<"--processlist"> and L<"--execute">. - -This option causes pt-query-digest to check every N seconds whether it is reading -from a read-write server and executing against a read-only server, which is a -sensible way to set up two servers if you're doing something like master-master -replication. The L master-master -toolkit does this. The aim is to keep the passive server ready for failover, -which is impossible without putting it under a realistic workload. - =item --order-by type: Array; default: Query_time:sum @@ -16256,8 +15846,7 @@ type: DSN Poll this DSN's processlist for queries, with L<"--interval"> sleep between. -If the connection fails, pt-query-digest tries to reopen it once per second. See -also L<"--mirror">. +If the connection fails, pt-query-digest tries to reopen it once per second. =item --progress @@ -16715,7 +16304,7 @@ several types: If you give a MySQL time expression, then you must also specify a DSN so that pt-query-digest can connect to MySQL to evaluate the expression. If you -specify L<"--execute">, L<"--explain">, L<"--processlist">, L<"--review"> +specify L<"--explain">, L<"--processlist">, L<"--review"> or L<"--review-history">, then one of these DSNs will be used automatically. Otherwise, you must specify an L<"--aux-dsn"> or pt-query-digest will die saying that the value is invalid. diff --git a/t/pt-query-digest/execute.t b/t/pt-query-digest/execute.t deleted file mode 100644 index 75b51203..00000000 --- a/t/pt-query-digest/execute.t +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env perl - -BEGIN { - die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" - unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; - unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; -}; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use Test::More; - -use Sandbox; -use PerconaTest; - -require "$trunk/bin/pt-query-digest"; - -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); -my $dbh = $sb->get_dbh_for('master'); - -if ( !$dbh ) { - plan skip_all => 'Cannot connect to sandbox master'; -} -else { - plan tests => 6; -} - -my $output = ''; -my $cnf = 'h=127.1,P=12345,u=msandbox,p=msandbox'; -my @args = qw(--report-format=query_report --limit 10 --stat); - -$sb->create_dbs($dbh, [qw(test)]); -$dbh->do('use test'); -$dbh->do('create table foo (a int, b int, c int)'); - -is_deeply( - $dbh->selectall_arrayref('select * from test.foo'), - [], - 'No rows in table yet' -); - -ok( - no_diff( - sub { pt_query_digest::main(@args, '--execute', $cnf, - "$trunk/t/lib/samples/slowlogs/slow018.txt") }, - 't/pt-query-digest/samples/slow018_execute_report_1.txt', - ), - '--execute without database' -); - -is_deeply( - $dbh->selectall_arrayref('select * from test.foo'), - [], - 'Still no rows in table' -); - -# Provide a default db to make --execute work. -$cnf .= ',D=test'; - -# TODO: This test is a PITA because every time the mqd output -# changes the -n of tail has to be adjusted. - -# - -# We tail to get everything from "Exec orig" onward. The lines -# above have the real execution time will will vary. The last 18 lines -# are sufficient to see that it actually executed without errors. -ok( - no_diff( - sub { pt_query_digest::main(@args, '--execute', $cnf, - "$trunk/t/lib/samples/slowlogs/slow018.txt") }, - 't/pt-query-digest/samples/slow018_execute_report_2.txt', - trf => 'tail -n 30', - sed => ["-e 's/s ##*/s/g'"], - ), - '--execute with default database' -); - -is_deeply( - $dbh->selectall_arrayref('select * from test.foo'), - [[qw(1 2 3)],[qw(4 5 6)]], - 'Rows in table' -); - -# ############################################################################# -# Done. -# ############################################################################# -$sb->wipe_clean($dbh); -ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); -exit; diff --git a/t/pt-query-digest/mirror.t b/t/pt-query-digest/mirror.t deleted file mode 100644 index 27e77aae..00000000 --- a/t/pt-query-digest/mirror.t +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env perl - -BEGIN { - die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" - unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; - unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; -}; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use Test::More; -use Time::HiRes qw(sleep); - -use PerconaTest; -use DSNParser; -use Sandbox; - -my $dp = new DSNParser(opts=>$dsn_opts); -my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); -my $dbh1 = $sb->get_dbh_for('master'); -my $dbh2 = $sb->get_dbh_for('slave1'); - -if ( !$dbh1 ) { - plan skip_all => 'Cannot connect to sandbox master'; -} -elsif ( !$dbh2 ) { - plan skip_all => 'Cannot connect to sandbox slave'; -} -else { - plan tests => 5; -} - -my $output; -my $cmd; -my $pid_file = "/tmp/pt-query-digest-mirror-test.pid"; -diag(`rm $pid_file 2>/dev/null`); - -# ########################################################################## -# Tests for swapping --processlist and --execute -# ########################################################################## -$dbh1->do('set global read_only=0'); -$dbh2->do('set global read_only=1'); -$cmd = "$trunk/bin/pt-query-digest " - . "--processlist h=127.1,P=12345,u=msandbox,p=msandbox " - . "--execute h=127.1,P=12346,u=msandbox,p=msandbox --mirror 1 " - . "--pid $pid_file"; - -{ - local $ENV{PTDEBUG}=1; - `$cmd > /tmp/read_only.txt 2>&1 &`; -} - -$dbh1->do('select sleep(1)'); -$dbh1->do('set global read_only=1'); -$dbh2->do('set global read_only=0'); -$dbh1->do('select sleep(1)'); - -PerconaTest::wait_for_files($pid_file); -chomp(my $pid = `cat $pid_file`); -kill 15, $pid; -sleep 0.25; - -# Verify that it's dead... -$output = `ps x | grep '^[ ]*$pid'`; -is( - $output, - '', - 'It is stopped now' -); - -$output = `ps -p $pid`; -unlike($output, qr/pt-query-digest/, 'It is stopped now'); - -$output = `grep read_only /tmp/read_only.txt`; -# Sample output: -# # main:3619 6897 read_only on execute for --execute: 1 (want 1) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on processlist for --processlist: 0 (want 0) -# # main:3619 6897 read_only on execute for --execute: 0 (want 1) -# # main:3622 6897 read_only wrong for --execute getting a dbh from processlist -# # main:3619 6897 read_only on processlist for --processlist: 1 (want 0) -# # main:3622 6897 read_only wrong for --processlist getting a dbh from execute -# # main:3619 6897 read_only on processlist for --execute: 1 (want 1) -# # main:3619 6897 read_only on execute for --processlist: 0 (want 0) -like($output, qr/wrong for --execute getting a dbh from processlist/, - 'switching --processlist works'); -like($output, qr/wrong for --processlist getting a dbh from execute/, - 'switching --execute works'); - -diag(`rm -rf /tmp/read_only.txt`); - -# ############################################################################# -# Done. -# ############################################################################# -diag(`rm $pid_file 2>/dev/null`); -$dbh1->do('set global read_only=0'); -$dbh2->do('set global read_only=1'); -$sb->wipe_clean($dbh1); -ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); -exit; diff --git a/t/pt-query-digest/samples/slow018_execute_report_1.txt b/t/pt-query-digest/samples/slow018_execute_report_1.txt deleted file mode 100644 index c03e29d7..00000000 --- a/t/pt-query-digest/samples/slow018_execute_report_1.txt +++ /dev/null @@ -1,41 +0,0 @@ - -# Query 1: 0 QPS, 0x concurrency, ID 0x6083030C4A5D8996 at byte 0 ________ -# This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2007-10-15 21:43:52 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 2s 2s 2s 2s 2s 0 2s -# Exec orig ti 100 2s 2s 2s 2s 2s 0 2s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 100 1 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 100 44 44 44 44 44 0 44 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'foo'\G -# SHOW CREATE TABLE `foo`\G -INSERT INTO `foo` VALUES (1, 2, 3) /*... omitted ...*/\G - -# Statistic Count %/Events -# ====================================== ===== ======== -# events_read 1 100.00 -# events_parsed 1 100.00 -# events_aggregated 1 100.00 -# execute_error 1 100.00 -# execute_executed 1 100.00 -# execute_no_database 1 100.00 -# pipeline_restarted_after_SlowLogParser 1 100.00 diff --git a/t/pt-query-digest/samples/slow018_execute_report_2.txt b/t/pt-query-digest/samples/slow018_execute_report_2.txt deleted file mode 100644 index dc7626a3..00000000 --- a/t/pt-query-digest/samples/slow018_execute_report_2.txt +++ /dev/null @@ -1,30 +0,0 @@ -# Exec orig ti 100 2s 2s 2s 2s 2s 0 2s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 100 1 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 100 44 44 44 44 44 0 44 -# Exec diff ti 100 0 0 0 0 0 0 0 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'foo'\G -# SHOW CREATE TABLE `foo`\G -INSERT INTO `foo` VALUES (1, 2, 3) /*... omitted ...*/\G - -# Statistic Count %/Events -# ====================================== ===== ======== -# events_read 1 100.00 -# events_parsed 1 100.00 -# events_aggregated 1 100.00 -# execute_executed 1 100.00 -# pipeline_restarted_after_SlowLogParser 1 100.00 From 944b2039d3512f9d0af32a1434d3ce6b55ffec61 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 13:20:39 -0300 Subject: [PATCH 02/34] pqd: Stop reporting apdex scores --- bin/pt-query-digest | 87 +------------------ lib/QueryReportFormatter.pm | 13 +-- .../QueryReportFormatter/report001.txt | 8 +- .../QueryReportFormatter/report002.txt | 4 +- .../QueryReportFormatter/report003.txt | 8 +- .../QueryReportFormatter/report004.txt | 8 +- .../QueryReportFormatter/report005.txt | 6 +- .../QueryReportFormatter/report007.txt | 2 +- .../QueryReportFormatter/report009.txt | 2 +- .../QueryReportFormatter/report010.txt | 2 +- .../QueryReportFormatter/report011.txt | 2 +- .../QueryReportFormatter/report012.txt | 2 +- .../QueryReportFormatter/report013.txt | 2 +- .../QueryReportFormatter/report014.txt | 2 +- .../QueryReportFormatter/report015.txt | 2 +- .../QueryReportFormatter/report016.txt | 2 +- .../QueryReportFormatter/report024.txt | 2 +- .../QueryReportFormatter/report028.txt | 2 +- .../QueryReportFormatter/report032.txt | 10 +-- t/pt-query-digest/samples/binlog001.txt | 28 +++--- t/pt-query-digest/samples/binlog002.txt | 16 ++-- .../samples/cannot-distill-profile.txt | 8 +- t/pt-query-digest/samples/genlog001.txt | 24 ++--- t/pt-query-digest/samples/genlog002.txt | 12 +-- t/pt-query-digest/samples/genlog003.txt | 24 ++--- t/pt-query-digest/samples/http_tcpdump002.txt | 20 ++--- .../samples/issue_1196-output-5.6.txt | 8 +- t/pt-query-digest/samples/memc_tcpdump001.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump002.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump003.txt | 4 +- .../memc_tcpdump003_report_key_print.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump004.txt | 4 +- t/pt-query-digest/samples/memc_tcpdump005.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump006.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump007.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump008.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump009.txt | 2 +- t/pt-query-digest/samples/memc_tcpdump010.txt | 2 +- t/pt-query-digest/samples/pg-sample1 | 46 +++++----- t/pt-query-digest/samples/pg-syslog-sample1 | 20 ++--- t/pt-query-digest/samples/rawlog001.txt | 12 +-- .../samples/slow001_distillreport.txt | 4 +- t/pt-query-digest/samples/slow001_report.txt | 4 +- .../samples/slow001_select_report.txt | 4 +- .../samples/slow001_tablesreport.txt | 2 +- .../samples/slow002-orderbynonexistent.txt | 14 +-- t/pt-query-digest/samples/slow002_iters_2.txt | 10 +-- .../samples/slow002_orderbyreport.txt | 4 +- t/pt-query-digest/samples/slow002_report.txt | 14 +-- .../samples/slow002_report_filtered.txt | 2 +- t/pt-query-digest/samples/slow003_report.txt | 2 +- t/pt-query-digest/samples/slow004_report.txt | 2 +- .../samples/slow006-order-by-re.txt | 4 +- t/pt-query-digest/samples/slow006_AR_1.txt | 4 +- t/pt-query-digest/samples/slow006_AR_2.txt | 2 +- t/pt-query-digest/samples/slow006_AR_4.txt | 4 +- t/pt-query-digest/samples/slow006_AR_5.txt | 2 +- t/pt-query-digest/samples/slow006_report.txt | 4 +- .../samples/slow007_explain_1-55.txt | 2 +- .../samples/slow007_explain_2-51.txt | 2 +- .../samples/slow007_explain_3.txt | 8 +- .../samples/slow007_explain_4.txt | 6 +- t/pt-query-digest/samples/slow008_report.txt | 6 +- .../samples/slow010_reportbyfile.txt | 2 +- t/pt-query-digest/samples/slow011_report.txt | 4 +- t/pt-query-digest/samples/slow013_report.txt | 8 +- .../slow013_report_fingerprint_user.txt | 4 +- .../samples/slow013_report_limit.txt | 2 +- .../samples/slow013_report_outliers.txt | 4 +- .../samples/slow013_report_profile.txt | 12 +-- .../samples/slow013_report_user.txt | 4 +- t/pt-query-digest/samples/slow014_report.txt | 2 +- t/pt-query-digest/samples/slow018_report.txt | 2 +- t/pt-query-digest/samples/slow019_report.txt | 4 +- .../samples/slow019_report_noza.txt | 4 +- t/pt-query-digest/samples/slow023.txt | 2 +- t/pt-query-digest/samples/slow024.txt | 6 +- t/pt-query-digest/samples/slow028.txt | 2 +- t/pt-query-digest/samples/slow032.txt | 2 +- .../samples/slow033-precise-since-until.txt | 4 +- .../samples/slow033-rtm-event-1h.txt | 8 +- .../samples/slow033-rtm-event-25h.txt | 8 +- .../samples/slow033-rtm-interval-1d.txt | 36 ++++---- .../samples/slow033-rtm-interval-30m.txt | 40 ++++----- .../slow033-rtm-interval-30s-3iter.txt | 24 ++--- .../samples/slow033-rtm-interval-30s.txt | 48 +++++----- .../samples/slow033-since-Nd.txt | 4 +- .../samples/slow033-since-yymmdd.txt | 4 +- .../samples/slow033-since-yyyy-mm-dd.txt | 2 +- .../samples/slow033-until-date.txt | 2 +- ...r-by-Locktime-sum-with-Locktime-distro.txt | 24 ++--- .../samples/slow034-order-by-Locktime-sum.txt | 24 ++--- t/pt-query-digest/samples/slow035.txt | 12 +-- t/pt-query-digest/samples/slow037_report.txt | 8 +- .../samples/slow042-show-all-host.txt | 2 +- t/pt-query-digest/samples/slow042.txt | 2 +- t/pt-query-digest/samples/slow048.txt | 8 +- t/pt-query-digest/samples/slow049.txt | 18 ++-- t/pt-query-digest/samples/slow050.txt | 8 +- t/pt-query-digest/samples/slow051.txt | 8 +- .../samples/slow052-apdex-t-0.1.txt | 66 -------------- t/pt-query-digest/samples/slow052.txt | 12 +-- t/pt-query-digest/samples/slow053.txt | 6 +- t/pt-query-digest/samples/slow054.txt | 2 +- t/pt-query-digest/samples/slow055.txt | 2 +- t/pt-query-digest/samples/slow056.txt | 4 +- t/pt-query-digest/samples/tcpdump001.txt | 2 +- .../samples/tcpdump002_report.txt | 8 +- t/pt-query-digest/samples/tcpdump003.txt | 2 +- t/pt-query-digest/samples/tcpdump012.txt | 2 +- .../samples/tcpdump017_report.txt | 8 +- t/pt-query-digest/samples/tcpdump021.txt | 6 +- t/pt-query-digest/samples/tcpdump022.txt | 4 +- t/pt-query-digest/samples/tcpdump023.txt | 4 +- t/pt-query-digest/samples/tcpdump024.txt | 4 +- t/pt-query-digest/samples/tcpdump025.txt | 4 +- t/pt-query-digest/samples/tcpdump033.txt | 16 ++-- t/pt-query-digest/samples/tcpdump041.txt | 8 +- t/pt-query-digest/slowlog_analyses.t | 9 -- 119 files changed, 439 insertions(+), 604 deletions(-) delete mode 100644 t/pt-query-digest/samples/slow052-apdex-t-0.1.txt diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 386db7ba..b09e5272 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -5444,15 +5444,6 @@ sub calculate_statistical_metrics { $classes->{$class}->{$attrib}->{all}, $classes->{$class}->{$attrib} ); - - if ( $args{apdex_t} && $attrib eq 'Query_time' ) { - $class_metrics->{$class}->{$attrib}->{apdex_t} = $args{apdex_t}; - $class_metrics->{$class}->{$attrib}->{apdex} - = $self->calculate_apdex( - t => $args{apdex_t}, - samples => $classes->{$class}->{$attrib}->{all}, - ); - } } } } @@ -5577,9 +5568,6 @@ sub metrics { median => $metrics->{classes}->{$where}->{$attrib}->{median} || 0, pct_95 => $metrics->{classes}->{$where}->{$attrib}->{pct_95} || 0, stddev => $metrics->{classes}->{$where}->{$attrib}->{stddev} || 0, - - apdex_t => $metrics->{classes}->{$where}->{$attrib}->{apdex_t}, - apdex => $metrics->{classes}->{$where}->{$attrib}->{apdex}, }; } @@ -5895,51 +5883,6 @@ sub _deep_copy_attrib_vals { return $copy; } -sub calculate_apdex { - my ( $self, %args ) = @_; - my @required_args = qw(t samples); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my ($t, $samples) = @args{@required_args}; - - if ( $t <= 0 ) { - die "Invalid target threshold (T): $t. T must be greater than zero"; - } - - my $f = 4 * $t; - PTDEBUG && _d("Apdex T =", $t, "F =", $f); - - my $satisfied = 0; - my $tolerating = 0; - my $frustrated = 0; # just for debug output - my $n_samples = 0; - BUCKET: - for my $bucket ( keys %$samples ) { - my $n_responses = $samples->{$bucket}; - my $response_time = $buck_vals[$bucket]; - - if ( $response_time <= $t ) { - $satisfied += $n_responses; - } - elsif ( $response_time <= $f ) { - $tolerating += $n_responses; - } - else { - $frustrated += $n_responses; - } - - $n_samples += $n_responses; - } - - my $apdex = sprintf('%.2f', ($satisfied + ($tolerating / 2)) / $n_samples); - PTDEBUG && _d($n_samples, "samples,", $satisfied, "satisfied,", - $tolerating, "tolerating,", $frustrated, "frustrated, Apdex score:", - $apdex); - - return $apdex; -} - sub _get_value { my ( $self, %args ) = @_; my ($event, $attrib, $alts) = @args{qw(event attribute alternates)}; @@ -6751,10 +6694,7 @@ sub event_report { { my $query_time = $ea->metrics(where => $item, attrib => 'Query_time'); push @result, - sprintf("# Scores: Apdex = %s [%3.1f]%s, V/M = %.2f", - (defined $query_time->{apdex} ? "$query_time->{apdex}" : "NS"), - ($query_time->{apdex_t} || 0), - ($query_time->{cnt} < 100 ? "*" : ""), + sprintf("# Scores: V/M = %.2f", ($query_time->{stddev}**2 / ($query_time->{avg} || 1)), ); } @@ -7017,7 +6957,6 @@ sub profile { $qr->distill($samp_query, %{$args{distill_args}}) : $item, id => $groupby eq 'fingerprint' ? make_checksum($item) : '', vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1), - apdex => defined $query_time->{apdex} ? $query_time->{apdex} : "NS", ); if ( $o->get('explain') && $samp_query ) { @@ -7048,7 +6987,6 @@ sub profile { { name => 'Response time', right_justify => 1, }, { name => 'Calls', right_justify => 1, }, { name => 'R/Call', right_justify => 1, }, - { name => 'Apdx', right_justify => 1, width => 4, }, { name => 'V/M', right_justify => 1, width => 5, }, ( $o->get('explain') ? { name => 'EXPLAIN' } : () ), { name => 'Item', }, @@ -7066,7 +7004,6 @@ sub profile { "$rt $rtp", $item->{cnt}, $rc, - $item->{apdex}, $vmr, ( $o->get('explain') ? $item->{explain_sparkline} || "" : () ), $item->{sample}, @@ -7094,7 +7031,6 @@ sub profile { "$rt $rtp", $misc->{cnt}, $rc, - 'NS', # Apdex is not meaningful here '0.0', # variance-to-mean ratio is not meaningful here ( $o->get('explain') ? "MISC" : () ), "<".scalar @$other." ITEMS>", @@ -13223,9 +13159,6 @@ sub main { } } - if ( $o->get('apdex-threshold') <= 0 ) { - $o->save_error("Apdex threshold must be a positive decimal value"); - } if ( my $patterns = $o->get('embedded-attributes') ) { $o->save_error("--embedded-attributes should be passed two " . "comma-separated patterns, got " . scalar(@$patterns) ) @@ -14389,9 +14322,7 @@ sub print_reports { for my $i ( 0..$#groupby ) { if ( $o->get('report') || $qv ) { - $eas->[$i]->calculate_statistical_metrics( - apdex_t => $o->get('apdex-threshold'), - ); + $eas->[$i]->calculate_statistical_metrics(); } my ($orderby_attrib, $orderby_func) = split(/:/, $orderby[$i]); @@ -14997,7 +14928,6 @@ that follows. It contains the following columns: Response time The total response time, and percentage of overall total Calls The number of times this query was executed R/Call The mean response time per execution - Apdx The Apdex score; see --apdex-threshold for details V/M The Variance-to-mean ratio of response time EXPLAIN If --explain was specified, a sparkline; see --explain Item The distilled query @@ -15297,19 +15227,6 @@ L<"SYNOPSIS"> and usage information for details. =over -=item --apdex-threshold - -type: float; default: 1.0 - -Set Apdex target threshold (T) for query response time. The Application -Performance Index (Apdex) Technical Specification V1.1 defines T as "a -positive decimal value in seconds, having no more than two significant digits -of granularity." This value only applies to query response time (Query_time). - -Options can be abbreviated so specifying C<--apdex-t> also works. - -See L. - =item --ask-pass Prompt for a password when connecting to MySQL. diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index 95eff7c2..c0340fb8 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -542,15 +542,12 @@ sub event_report { . ($args{reason} eq 'top' ? '--limit.' : '--outliers.'); } - # Third line: Apdex and variance-to-mean (V/M) ratio, like: - # Scores: Apdex = 0.93 [1.0], V/M = 1.5 + # Third line: Variance-to-mean (V/M) ratio, like: + # Scores: V/M = 1.5 { my $query_time = $ea->metrics(where => $item, attrib => 'Query_time'); push @result, - sprintf("# Scores: Apdex = %s [%3.1f]%s, V/M = %.2f", - (defined $query_time->{apdex} ? "$query_time->{apdex}" : "NS"), - ($query_time->{apdex_t} || 0), - ($query_time->{cnt} < 100 ? "*" : ""), + sprintf("# Scores: V/M = %.2f", ($query_time->{stddev}**2 / ($query_time->{avg} || 1)), ); } @@ -874,7 +871,6 @@ sub profile { $qr->distill($samp_query, %{$args{distill_args}}) : $item, id => $groupby eq 'fingerprint' ? make_checksum($item) : '', vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1), - apdex => defined $query_time->{apdex} ? $query_time->{apdex} : "NS", ); # Get EXPLAIN sparkline if --explain. @@ -906,7 +902,6 @@ sub profile { { name => 'Response time', right_justify => 1, }, { name => 'Calls', right_justify => 1, }, { name => 'R/Call', right_justify => 1, }, - { name => 'Apdx', right_justify => 1, width => 4, }, { name => 'V/M', right_justify => 1, width => 5, }, ( $o->get('explain') ? { name => 'EXPLAIN' } : () ), { name => 'Item', }, @@ -924,7 +919,6 @@ sub profile { "$rt $rtp", $item->{cnt}, $rc, - $item->{apdex}, $vmr, ( $o->get('explain') ? $item->{explain_sparkline} || "" : () ), $item->{sample}, @@ -954,7 +948,6 @@ sub profile { "$rt $rtp", $misc->{cnt}, $rc, - 'NS', # Apdex is not meaningful here '0.0', # variance-to-mean ratio is not meaningful here ( $o->get('explain') ? "MISC" : () ), "<".scalar @$other." ITEMS>", diff --git a/t/lib/samples/QueryReportFormatter/report001.txt b/t/lib/samples/QueryReportFormatter/report001.txt index 304abd51..78c30c81 100644 --- a/t/lib/samples/QueryReportFormatter/report001.txt +++ b/t/lib/samples/QueryReportFormatter/report001.txt @@ -7,7 +7,7 @@ # Lock time 1ms 1ms 1ms 1ms 1ms 0 1ms # Query 1: 0 QPS, 0x concurrency, ID 0x5796997451B1FA1D at byte 123 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median @@ -35,6 +35,6 @@ select col from tbl where id=42\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 1.00 0.00 SELECT tbl +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 0.00 SELECT tbl diff --git a/t/lib/samples/QueryReportFormatter/report002.txt b/t/lib/samples/QueryReportFormatter/report002.txt index fbaedf8b..c2c2710f 100644 --- a/t/lib/samples/QueryReportFormatter/report002.txt +++ b/t/lib/samples/QueryReportFormatter/report002.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x3F79759E7FA2F117 at byte 1106 _____ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637892 # Attribute pct total min max avg 95% stddev median @@ -31,7 +31,7 @@ EXECUTE SELECT i FROM d.t WHERE i="3"\G SELECT i FROM d.t WHERE i="3"\G # Query 2: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median diff --git a/t/lib/samples/QueryReportFormatter/report003.txt b/t/lib/samples/QueryReportFormatter/report003.txt index 9e92376b..58aa0a53 100644 --- a/t/lib/samples/QueryReportFormatter/report003.txt +++ b/t/lib/samples/QueryReportFormatter/report003.txt @@ -1,11 +1,11 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 1.00 0.00 SELECT tbl +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x5796997451B1FA1D 1.0007 100.0% 1 1.0007 0.00 SELECT tbl # Query 1: 0 QPS, 0x concurrency, ID 0x5796997451B1FA1D at byte 123 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/lib/samples/QueryReportFormatter/report004.txt b/t/lib/samples/QueryReportFormatter/report004.txt index 39e50b90..fadc6838 100644 --- a/t/lib/samples/QueryReportFormatter/report004.txt +++ b/t/lib/samples/QueryReportFormatter/report004.txt @@ -1,6 +1,6 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAECF4CA2310AC9E2 1.0303 97.1% 1 1.0303 NS 0.00 UPDATE foo -# MISC 0xMISC 0.0306 2.9% 2 0.0153 NS 0.0 <2 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAECF4CA2310AC9E2 1.0303 97.1% 1 1.0303 0.00 UPDATE foo +# MISC 0xMISC 0.0306 2.9% 2 0.0153 0.0 <2 ITEMS> diff --git a/t/lib/samples/QueryReportFormatter/report005.txt b/t/lib/samples/QueryReportFormatter/report005.txt index aea3787c..502c32b9 100644 --- a/t/lib/samples/QueryReportFormatter/report005.txt +++ b/t/lib/samples/QueryReportFormatter/report005.txt @@ -1,5 +1,5 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ====== ==== ===== ======== -# 1 0xCB5621E548E5497F 17.5000 100.0% 4 4.3750 NS 2.23 SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============== ===== ====== ===== ======== +# 1 0xCB5621E548E5497F 17.5000 100.0% 4 4.3750 2.23 SELECT t diff --git a/t/lib/samples/QueryReportFormatter/report007.txt b/t/lib/samples/QueryReportFormatter/report007.txt index 3ce434fa..932ec60c 100644 --- a/t/lib/samples/QueryReportFormatter/report007.txt +++ b/t/lib/samples/QueryReportFormatter/report007.txt @@ -1,6 +1,6 @@ # Query 1: 2 QPS, 9.00x concurrency, ID 0x82860EDA9A88FCC5 at byte 1 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 5.44 +# Scores: V/M = 5.44 # Time range: 2007-10-15 21:43:52 to 21:43:53 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report009.txt b/t/lib/samples/QueryReportFormatter/report009.txt index dcd22b37..e208c406 100644 --- a/t/lib/samples/QueryReportFormatter/report009.txt +++ b/t/lib/samples/QueryReportFormatter/report009.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Time range: all events occurred at 2009-04-12 11:00:13.118191 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report010.txt b/t/lib/samples/QueryReportFormatter/report010.txt index 6dcbd45c..319343f5 100644 --- a/t/lib/samples/QueryReportFormatter/report010.txt +++ b/t/lib/samples/QueryReportFormatter/report010.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report011.txt b/t/lib/samples/QueryReportFormatter/report011.txt index 42508fc8..02d54b0c 100644 --- a/t/lib/samples/QueryReportFormatter/report011.txt +++ b/t/lib/samples/QueryReportFormatter/report011.txt @@ -1,6 +1,6 @@ # Query 1: 0.67 QPS, 1x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = NS [0.0]*, V/M = 0.33 +# Scores: V/M = 0.33 # Time range: 2007-10-15 21:43:52 to 21:43:55 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report012.txt b/t/lib/samples/QueryReportFormatter/report012.txt index 31943a57..330f8d68 100644 --- a/t/lib/samples/QueryReportFormatter/report012.txt +++ b/t/lib/samples/QueryReportFormatter/report012.txt @@ -1,6 +1,6 @@ # Query 1: 1 QPS, 2x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = NS [0.0]*, V/M = 0.30 +# Scores: V/M = 0.30 # Time range: 2007-10-15 21:43:52 to 21:43:55 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report013.txt b/t/lib/samples/QueryReportFormatter/report013.txt index 1ef0919d..3ff99d58 100644 --- a/t/lib/samples/QueryReportFormatter/report013.txt +++ b/t/lib/samples/QueryReportFormatter/report013.txt @@ -1,5 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 2 diff --git a/t/lib/samples/QueryReportFormatter/report014.txt b/t/lib/samples/QueryReportFormatter/report014.txt index b0fc35ed..782b0010 100644 --- a/t/lib/samples/QueryReportFormatter/report014.txt +++ b/t/lib/samples/QueryReportFormatter/report014.txt @@ -1,5 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 3 diff --git a/t/lib/samples/QueryReportFormatter/report015.txt b/t/lib/samples/QueryReportFormatter/report015.txt index 04949587..f38ae401 100644 --- a/t/lib/samples/QueryReportFormatter/report015.txt +++ b/t/lib/samples/QueryReportFormatter/report015.txt @@ -1,5 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report016.txt b/t/lib/samples/QueryReportFormatter/report016.txt index b443426a..3a6fe8bc 100644 --- a/t/lib/samples/QueryReportFormatter/report016.txt +++ b/t/lib/samples/QueryReportFormatter/report016.txt @@ -1,5 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report024.txt b/t/lib/samples/QueryReportFormatter/report024.txt index da2a0d3d..2b1b7d80 100644 --- a/t/lib/samples/QueryReportFormatter/report024.txt +++ b/t/lib/samples/QueryReportFormatter/report024.txt @@ -1,5 +1,5 @@ # Query 0: 0 QPS, 0x concurrency, ID 0x82860EDA9A88FCC5 at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report028.txt b/t/lib/samples/QueryReportFormatter/report028.txt index 3640f6c1..eeba3c53 100644 --- a/t/lib/samples/QueryReportFormatter/report028.txt +++ b/t/lib/samples/QueryReportFormatter/report028.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xFDE00DF974C61E9F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.62 [1.0]*, V/M = 17.71 +# Scores: V/M = 17.71 # Query_time sparkline: |^^ ^^| # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report032.txt b/t/lib/samples/QueryReportFormatter/report032.txt index d792a3b2..fc82f664 100644 --- a/t/lib/samples/QueryReportFormatter/report032.txt +++ b/t/lib/samples/QueryReportFormatter/report032.txt @@ -1,12 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 NS 0.00 ia SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 NS 0.0 MISC <1 ITEMS> +# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item +# ==== ================== ============= ===== ====== ===== ======= ========= +# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 0.00 ia SELECT t +# MISC 0xMISC 0.0003 100.0% 1 0.0003 0.0 MISC <1 ITEMS> # Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # EXPLAIN sparkline: ia # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 diff --git a/t/pt-query-digest/samples/binlog001.txt b/t/pt-query-digest/samples/binlog001.txt index b6acca7b..e17a903e 100644 --- a/t/pt-query-digest/samples/binlog001.txt +++ b/t/pt-query-digest/samples/binlog001.txt @@ -15,7 +15,7 @@ # error code 0 0 0 0 0 0 0 # Query 1: 0 QPS, 0x concurrency, ID 0xCD948EAF18BC614E at byte 953 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:08 # Attribute pct total min max avg 95% stddev median @@ -52,7 +52,7 @@ replace into test4.tbl9(tbl5, day, todo, comment) and o.col3 >= date_sub(current_date, interval 30 day)\G # Query 2: 0 QPS, 0x concurrency, ID 0xC356FD9EFD7D799E at byte 605 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:07 # Attribute pct total min max avg 95% stddev median @@ -89,7 +89,7 @@ select e.tblo = o.tblo, inner join test3.tbl2 as e on o.animal = e.animal and o.oid = e.oid where e.tblo is null\G # Query 3: 0 QPS, 0x concurrency, ID 0xB5E55291C7DE1096 at byte 1469 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:50 # Attribute pct total min max avg 95% stddev median @@ -126,7 +126,7 @@ select o.tbl2 = e.tbl2, on o.animal = e.animal and o.oid = e.oid where o.tbl2 is null\G # Query 4: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 146 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:50 # Attribute pct total min max avg 95% stddev median @@ -156,7 +156,7 @@ select o.tbl2 = e.tbl2, BEGIN\G # Query 5: 0 QPS, 0x concurrency, ID 0xED69B13F3D0161D0 at byte 2479 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:53 # Attribute pct total min max avg 95% stddev median @@ -191,7 +191,7 @@ select last2metric1 = last1metric1, last2time = last1time, last0metric1 = ondeckmetric1, last0time = now() from test2.tbl8 where tbl8 in (10800712)\G # Query 6: 0 QPS, 0x concurrency, ID 0x79BFEA84D0CED05F at byte 1889 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:53 # Attribute pct total min max avg 95% stddev median @@ -222,11 +222,11 @@ insert into test1.tbl6 metric12 = metric12 + values(metric12), secs = secs + values(secs)\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Ite -# ==== ================== ================ ===== ========== ==== ===== === -# 1 0xCD948EAF18BC614E 20704.0000 16.7% 1 20704.0000 0.00 0.00 REPLACE SELECT test?.tbl? test?.tblo test?.tbl? -# 2 0xC356FD9EFD7D799E 20675.0000 16.7% 1 20675.0000 0.00 0.00 UPDATE test?.tblo test?.tbl? -# 3 0xB5E55291C7DE1096 20664.0000 16.7% 1 20664.0000 0.00 0.00 UPDATE test?.tblo test?.tbl? -# 4 0x85FFF5AA78E5FF6A 20664.0000 16.7% 1 20664.0000 0.00 0.00 BEGIN -# 5 0xED69B13F3D0161D0 20661.0000 16.7% 1 20661.0000 0.00 0.00 UPDATE test?.tbl? -# 6 0x79BFEA84D0CED05F 20661.0000 16.7% 1 20661.0000 0.00 0.00 INSERT UPDATE test?.tbl? +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ================ ===== ========== ===== ======== +# 1 0xCD948EAF18BC614E 20704.0000 16.7% 1 20704.0000 0.00 REPLACE SELECT test?.tbl? test?.tblo test?.tbl? +# 2 0xC356FD9EFD7D799E 20675.0000 16.7% 1 20675.0000 0.00 UPDATE test?.tblo test?.tbl? +# 3 0xB5E55291C7DE1096 20664.0000 16.7% 1 20664.0000 0.00 UPDATE test?.tblo test?.tbl? +# 4 0x85FFF5AA78E5FF6A 20664.0000 16.7% 1 20664.0000 0.00 BEGIN +# 5 0xED69B13F3D0161D0 20661.0000 16.7% 1 20661.0000 0.00 UPDATE test?.tbl? +# 6 0x79BFEA84D0CED05F 20661.0000 16.7% 1 20661.0000 0.00 INSERT UPDATE test?.tbl? diff --git a/t/pt-query-digest/samples/binlog002.txt b/t/pt-query-digest/samples/binlog002.txt index 076171f9..c5edf880 100644 --- a/t/pt-query-digest/samples/binlog002.txt +++ b/t/pt-query-digest/samples/binlog002.txt @@ -16,7 +16,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xF25D6D5AC7C18FF3 at byte 381 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2009-07-22 07:21:59 # Attribute pct total min max avg 95% stddev median @@ -45,7 +45,7 @@ create database d\G # Query 2: 0 QPS, 0x concurrency, ID 0x03409022EB8A4AE7 at byte 795 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2009-07-22 07:22:16 # Attribute pct total min max avg 95% stddev median @@ -72,7 +72,7 @@ create table foo (i int)\G # Query 3: 0 QPS, 0x concurrency, ID 0xF579EC4A9633EEA0 at byte 973 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2009-07-22 07:22:24 # Attribute pct total min max avg 95% stddev median @@ -95,8 +95,8 @@ create table foo (i int)\G insert foo values (1) /*... omitted ...*/\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xF25D6D5AC7C18FF3 0.0000 0.0% 1 0.0000 1.00 0.00 CREATE DATABASE d -# 2 0x03409022EB8A4AE7 0.0000 0.0% 1 0.0000 1.00 0.00 CREATE TABLE foo -# 3 0xF579EC4A9633EEA0 0.0000 0.0% 1 0.0000 1.00 0.00 INSERT +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0xF25D6D5AC7C18FF3 0.0000 0.0% 1 0.0000 0.00 CREATE DATABASE d +# 2 0x03409022EB8A4AE7 0.0000 0.0% 1 0.0000 0.00 CREATE TABLE foo +# 3 0xF579EC4A9633EEA0 0.0000 0.0% 1 0.0000 0.00 INSERT diff --git a/t/pt-query-digest/samples/cannot-distill-profile.txt b/t/pt-query-digest/samples/cannot-distill-profile.txt index de462a62..60a42713 100644 --- a/t/pt-query-digest/samples/cannot-distill-profile.txt +++ b/t/pt-query-digest/samples/cannot-distill-profile.txt @@ -1,6 +1,6 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5B721CAE3EDDB56B 0.0900 69.1% 1 0.0900 1.00 0.00 -# 2 0xBE90A42C0FB7E89E 0.0403 30.9% 1 0.0403 1.00 0.00 +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =========== +# 1 0x5B721CAE3EDDB56B 0.0900 69.1% 1 0.0900 0.00 +# 2 0xBE90A42C0FB7E89E 0.0403 30.9% 1 0.0403 0.00 diff --git a/t/pt-query-digest/samples/genlog001.txt b/t/pt-query-digest/samples/genlog001.txt index a08e68e9..1c0419c9 100644 --- a/t/pt-query-digest/samples/genlog001.txt +++ b/t/pt-query-digest/samples/genlog001.txt @@ -7,7 +7,7 @@ # Query size 315 27 124 45 118.34 31.33 28.75 # Query 1: 0.00 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 244 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: 2005-10-07 21:55:24 to 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median @@ -31,7 +31,7 @@ administrator command: Connect\G # Query 2: 0.00 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 464 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: 2005-10-07 21:55:24 to 2006-12-26 16:44:48 # Attribute pct total min max avg 95% stddev median @@ -53,7 +53,7 @@ administrator command: Connect\G administrator command: Quit\G # Query 3: 0 QPS, 0x concurrency, ID 0x4D096479916B0F45 at byte 346 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median @@ -79,7 +79,7 @@ administrator command: Quit\G SELECT DISTINCT col FROM tbl WHERE foo=20061219\G # Query 4: 0 QPS, 0x concurrency, ID 0x44AAC79F41BCF692 at byte 58 _______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median @@ -108,7 +108,7 @@ SELECT foo ORDER BY col\G # Query 5: 0 QPS, 0x concurrency, ID 0x44AE35A182869033 at byte 300 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median @@ -130,10 +130,10 @@ SELECT foo administrator command: Init DB\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN CONNECT -# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN QUIT -# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 1.00 0.00 ADMIN INIT DB +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============= +# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 0.00 ADMIN CONNECT +# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 0.00 ADMIN QUIT +# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 0.00 ADMIN INIT DB diff --git a/t/pt-query-digest/samples/genlog002.txt b/t/pt-query-digest/samples/genlog002.txt index 5afe0258..ca1b392f 100644 --- a/t/pt-query-digest/samples/genlog002.txt +++ b/t/pt-query-digest/samples/genlog002.txt @@ -8,7 +8,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x2361B36A4AEB397B at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2010-02-11 00:55:24 # Attribute pct total min max avg 95% stddev median @@ -35,7 +35,7 @@ SELECT category_id # Query 2: 0 QPS, 0x concurrency, ID 0x0A3E6DCD23F3445A at byte 237 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2010-02-11 00:55:24 # Attribute pct total min max avg 95% stddev median @@ -71,7 +71,7 @@ SELECT auction_id, auction_title_en AS title, close_time, LIMIT 500\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x2361B36A4AEB397B 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT auction_category_map -# 2 0x0A3E6DCD23F3445A 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT auction_search +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x2361B36A4AEB397B 0.0000 0.0% 1 0.0000 0.00 SELECT auction_category_map +# 2 0x0A3E6DCD23F3445A 0.0000 0.0% 1 0.0000 0.00 SELECT auction_search diff --git a/t/pt-query-digest/samples/genlog003.txt b/t/pt-query-digest/samples/genlog003.txt index 5330867f..c1c408e9 100644 --- a/t/pt-query-digest/samples/genlog003.txt +++ b/t/pt-query-digest/samples/genlog003.txt @@ -7,7 +7,7 @@ # Query size 315 27 124 45 118.34 31.33 28.75 # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 246 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median @@ -31,7 +31,7 @@ administrator command: Connect\G # Query 2: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 466 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median @@ -53,7 +53,7 @@ administrator command: Connect\G administrator command: Quit\G # Query 3: 0 QPS, 0x concurrency, ID 0x4D096479916B0F45 at byte 348 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median @@ -79,7 +79,7 @@ administrator command: Quit\G SELECT DISTINCT col FROM tbl WHERE foo=20061219\G # Query 4: 0 QPS, 0x concurrency, ID 0x44AAC79F41BCF692 at byte 60 _______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median @@ -108,7 +108,7 @@ SELECT foo ORDER BY col\G # Query 5: 0 QPS, 0x concurrency, ID 0x44AE35A182869033 at byte 302 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median @@ -130,10 +130,10 @@ SELECT foo administrator command: Init DB\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN CONNECT -# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 1.00 0.00 ADMIN QUIT -# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT tbl -# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 1.00 0.00 ADMIN INIT DB +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============= +# 1 0x5D51E5F01B88B79E 0.0000 0.0% 2 0.0000 0.00 ADMIN CONNECT +# 2 0xAA353644DE4C4CB4 0.0000 0.0% 2 0.0000 0.00 ADMIN QUIT +# 3 0x4D096479916B0F45 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 4 0x44AAC79F41BCF692 0.0000 0.0% 1 0.0000 0.00 SELECT tbl +# 5 0x44AE35A182869033 0.0000 0.0% 1 0.0000 0.00 ADMIN INIT DB diff --git a/t/pt-query-digest/samples/http_tcpdump002.txt b/t/pt-query-digest/samples/http_tcpdump002.txt index 4d48f8a7..5264d58a 100644 --- a/t/pt-query-digest/samples/http_tcpdump002.txt +++ b/t/pt-query-digest/samples/http_tcpdump002.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xFB0C089DD4451762 at byte 59213 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.411349 # Attribute pct total min max avg 95% stddev median @@ -25,7 +25,7 @@ get www.percona.com/images/menu_our-vision.gif # Query 2: 0 QPS, 0x concurrency, ID 0x7C3AA9143C98C14A at byte 206 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.074855 # Attribute pct total min max avg 95% stddev median @@ -50,7 +50,7 @@ get www.percona.com/images/menu_our-vision.gif get www.percona.com/about-us.html # Query 3: 0 QPS, 0x concurrency, ID 0x7CC09CE55CB7750C at byte 16362 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.157215 # Attribute pct total min max avg 95% stddev median @@ -75,7 +75,7 @@ get www.percona.com/about-us.html get www.percona.com/js/jquery.js # Query 4: 0 QPS, 0x concurrency, ID 0x44C0C94594575296 at byte 65644 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.420851 # Attribute pct total min max avg 95% stddev median @@ -100,7 +100,7 @@ get www.percona.com/js/jquery.js get www.percona.com/images/bg-gray-corner-top.gif # Query 5: 0 QPS, 0x concurrency, ID 0x08207FBDE8A42C36 at byte 67956 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.420996 # Attribute pct total min max avg 95% stddev median @@ -125,7 +125,7 @@ get www.percona.com/images/bg-gray-corner-top.gif get www.percona.com/images/handshake.jpg # Query 6: 0 QPS, 0x concurrency, ID 0x4F1E2B5E822F55B8 at byte 53100 ____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.346763 # Attribute pct total min max avg 95% stddev median @@ -150,7 +150,7 @@ get www.percona.com/images/handshake.jpg get www.percona.com/images/menu_team.gif # Query 7: 0 QPS, 0x concurrency, ID 0x7FB624EE10D71E1F at byte 170117 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:14.737890 # Attribute pct total min max avg 95% stddev median @@ -175,7 +175,7 @@ get www.percona.com/images/menu_team.gif get hit.clickaider.com/s/forms.js # Query 8: 0 QPS, 0x concurrency, ID 0x1279DE4968C95A8D at byte 147447 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:14.536149 # Attribute pct total min max avg 95% stddev median @@ -200,7 +200,7 @@ get hit.clickaider.com/s/forms.js get hit.clickaider.com/clickaider.js # Query 9: 0 QPS, 0x concurrency, ID 0x590BE2A84B8F0D5B at byte 167245 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:14.678713 # Attribute pct total min max avg 95% stddev median @@ -225,7 +225,7 @@ get hit.clickaider.com/clickaider.js get hit.clickaider.com/pv?lng=140&&lnks=&t=About%20Percona&c=73a41b95-2926&r=http%3A%2F%2Fwww.percona.com%2F&tz=-420&loc=http%3A%2F%2Fwww.percona.com%2Fabout-us.html&rnd=3688 # Query 10: 0 QPS, 0x concurrency, ID 0xFC5C4A690D695F35 at byte 55942 ___ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.373800 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/issue_1196-output-5.6.txt b/t/pt-query-digest/samples/issue_1196-output-5.6.txt index bd69285f..945df0d5 100644 --- a/t/pt-query-digest/samples/issue_1196-output-5.6.txt +++ b/t/pt-query-digest/samples/issue_1196-output-5.6.txt @@ -1,12 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 1.00 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item +# ==== ================== ============= ===== ====== ===== ======= ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 TF>aa SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # EXPLAIN sparkline: TF>aa # Query_time sparkline: | ^ | # Time range: all events occurred at 2010-12-14 16:12:28 diff --git a/t/pt-query-digest/samples/memc_tcpdump001.txt b/t/pt-query-digest/samples/memc_tcpdump001.txt index d8966641..ea59f953 100644 --- a/t/pt-query-digest/samples/memc_tcpdump001.txt +++ b/t/pt-query-digest/samples/memc_tcpdump001.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x26193ADA9E14A97E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 21:33:39.229179 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump002.txt b/t/pt-query-digest/samples/memc_tcpdump002.txt index c94b74b9..6f4e0216 100644 --- a/t/pt-query-digest/samples/memc_tcpdump002.txt +++ b/t/pt-query-digest/samples/memc_tcpdump002.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x456F2F160AF2DC0F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 22:12:06.174390 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump003.txt b/t/pt-query-digest/samples/memc_tcpdump003.txt index 5b950344..a32419f9 100644 --- a/t/pt-query-digest/samples/memc_tcpdump003.txt +++ b/t/pt-query-digest/samples/memc_tcpdump003.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAEBF67014CC9A7C0 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 22:12:06.175734 # Attribute pct total min max avg 95% stddev median @@ -27,7 +27,7 @@ incr key # Query 2: 0 QPS, 0x concurrency, ID 0xC03129972E1D6A1F at byte 522 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 22:12:06.176181 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt b/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt index a04d0331..c0fea77a 100644 --- a/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt +++ b/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt @@ -5,7 +5,7 @@ # Item 1: 4.47k QPS, 0.32x concurrency, ID 0x8228B9A98CA1531D at byte 0 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-04 22:12:06.175734 to 22:12:06.176181 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump004.txt b/t/pt-query-digest/samples/memc_tcpdump004.txt index 27acc028..cc9d2cfd 100644 --- a/t/pt-query-digest/samples/memc_tcpdump004.txt +++ b/t/pt-query-digest/samples/memc_tcpdump004.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAEBF67014CC9A7C0 at byte 764 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 10:37:21.668469 # Attribute pct total min max avg 95% stddev median @@ -28,7 +28,7 @@ incr key # Query 2: 0 QPS, 0x concurrency, ID 0xC03129972E1D6A1F at byte 1788 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 10:37:21.668851 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump005.txt b/t/pt-query-digest/samples/memc_tcpdump005.txt index da7a20c0..3bbceb2f 100644 --- a/t/pt-query-digest/samples/memc_tcpdump005.txt +++ b/t/pt-query-digest/samples/memc_tcpdump005.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x26193ADA9E14A97E at byte 764 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 22:07:14.406827 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump006.txt b/t/pt-query-digest/samples/memc_tcpdump006.txt index 988b8688..2d70b633 100644 --- a/t/pt-query-digest/samples/memc_tcpdump006.txt +++ b/t/pt-query-digest/samples/memc_tcpdump006.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x456F2F160AF2DC0F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 22:07:14.411331 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump007.txt b/t/pt-query-digest/samples/memc_tcpdump007.txt index 93ecfa72..50484355 100644 --- a/t/pt-query-digest/samples/memc_tcpdump007.txt +++ b/t/pt-query-digest/samples/memc_tcpdump007.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x28C64E8A71EEAEAF at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-06-11 21:54:49.059144 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump008.txt b/t/pt-query-digest/samples/memc_tcpdump008.txt index 805de0b6..0dd87d4a 100644 --- a/t/pt-query-digest/samples/memc_tcpdump008.txt +++ b/t/pt-query-digest/samples/memc_tcpdump008.txt @@ -1,7 +1,7 @@ # Query 1: 645.28k QPS, 1.29x concurrency, ID 0x456F2F160AF2DC0F at byte 0 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: |^ | # Time range: 2009-07-06 22:07:14.411331 to 22:07:14.411334 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump009.txt b/t/pt-query-digest/samples/memc_tcpdump009.txt index 44a7a7ae..3fd672fb 100644 --- a/t/pt-query-digest/samples/memc_tcpdump009.txt +++ b/t/pt-query-digest/samples/memc_tcpdump009.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6A3331FD94A66F54 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-06-11 21:54:52.244534 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/memc_tcpdump010.txt b/t/pt-query-digest/samples/memc_tcpdump010.txt index 32f7de80..a6825f4c 100644 --- a/t/pt-query-digest/samples/memc_tcpdump010.txt +++ b/t/pt-query-digest/samples/memc_tcpdump010.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x3D1AED9A2A3A73C8 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-09 22:00:29.066476 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/pg-sample1 b/t/pt-query-digest/samples/pg-sample1 index 7339202e..d8030e05 100644 --- a/t/pt-query-digest/samples/pg-sample1 +++ b/t/pt-query-digest/samples/pg-sample1 @@ -1,25 +1,25 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x949BAEB72FDE23A2 1.4501 33.3% 16 0.0906 1.00 0.00 SELECT stats_cvs_group -# 2 0x8FFEBD609B778EB2 0.5053 11.6% 45 0.0112 1.00 0.10 INSERT activity_log -# 3 0x64F8E6F000640AF8 0.3750 8.6% 5 0.0750 1.00 0.00 SELECT users -# 4 0x22375E33FDA4E899 0.3705 8.5% 1 0.3705 1.00 0.00 SELECT ONLY OF -# 5 0x60D6962E42C08882 0.1020 2.3% 46 0.0022 1.00 0.00 SELECT plugins -# 6 0x32AF9886FDBBAE30 0.0981 2.3% 38 0.0026 1.00 0.00 SELECT frs_filetype frs_processor frs_file frs_dlstats_filetotal_agg -# 7 0x5E64B4F52EC23D71 0.0936 2.1% 17 0.0055 1.00 0.01 SELECT trove_cat trove_group_link -# 8 0x1929E67B76DC55E7 0.0877 2.0% 5 0.0175 1.00 0.00 SELECT frs_dlstats_grouptotal_vw groups -# 9 0x1451AE69DBB6E0F2 0.0780 1.8% 1 0.0780 1.00 0.00 SELECT users -# 10 0xD7884E7E471BB089 0.0722 1.7% 61 0.0012 1.00 0.00 SELECT forum_group_list_vw -# 11 0x9DBDF5FB59454957 0.0612 1.4% 5 0.0122 1.00 0.00 SELECT users news_bytes groups -# 12 0x834CC93BAA549DD4 0.0609 1.4% 17 0.0036 1.00 0.00 SELECT users user_group -# 13 0xEF691689ACF9DC59 0.0595 1.4% 10 0.0059 1.00 0.00 SELECT frs_package frs_release frs_file groups -# 14 0x10D09F1381004A22 0.0582 1.3% 17 0.0034 1.00 0.00 SELECT groups -# 15 0xCF439D1EC0933550 0.0579 1.3% 2 0.0290 1.00 0.04 SELECT pg_catalog.pg_class pg_catalog.pg_namespace -# 16 0x7D752C8A15925978 0.0544 1.2% 60 0.0009 1.00 0.00 BEGIN SELECT -# 17 0x82AEF03891943FB3 0.0514 1.2% 2 0.0257 1.00 0.03 SELECT forum_group_list_vw -# 18 0x9AA827C1DF73EE43 0.0496 1.1% 17 0.0029 1.00 0.00 SELECT users news_bytes groups -# 19 0x4636BFC0875521C9 0.0447 1.0% 40 0.0011 1.00 0.00 SELECT supported_languages -# 20 0xB1C777CE6EBFE87E 0.0434 1.0% 17 0.0026 1.00 0.00 SELECT frs_package frs_release -# MISC 0xMISC 0.5823 13.4% 334 0.0017 NS 0.0 <47 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x949BAEB72FDE23A2 1.4501 33.3% 16 0.0906 0.00 SELECT stats_cvs_group +# 2 0x8FFEBD609B778EB2 0.5053 11.6% 45 0.0112 0.10 INSERT activity_log +# 3 0x64F8E6F000640AF8 0.3750 8.6% 5 0.0750 0.00 SELECT users +# 4 0x22375E33FDA4E899 0.3705 8.5% 1 0.3705 0.00 SELECT ONLY OF +# 5 0x60D6962E42C08882 0.1020 2.3% 46 0.0022 0.00 SELECT plugins +# 6 0x32AF9886FDBBAE30 0.0981 2.3% 38 0.0026 0.00 SELECT frs_filetype frs_processor frs_file frs_dlstats_filetotal_agg +# 7 0x5E64B4F52EC23D71 0.0936 2.1% 17 0.0055 0.01 SELECT trove_cat trove_group_link +# 8 0x1929E67B76DC55E7 0.0877 2.0% 5 0.0175 0.00 SELECT frs_dlstats_grouptotal_vw groups +# 9 0x1451AE69DBB6E0F2 0.0780 1.8% 1 0.0780 0.00 SELECT users +# 10 0xD7884E7E471BB089 0.0722 1.7% 61 0.0012 0.00 SELECT forum_group_list_vw +# 11 0x9DBDF5FB59454957 0.0612 1.4% 5 0.0122 0.00 SELECT users news_bytes groups +# 12 0x834CC93BAA549DD4 0.0609 1.4% 17 0.0036 0.00 SELECT users user_group +# 13 0xEF691689ACF9DC59 0.0595 1.4% 10 0.0059 0.00 SELECT frs_package frs_release frs_file groups +# 14 0x10D09F1381004A22 0.0582 1.3% 17 0.0034 0.00 SELECT groups +# 15 0xCF439D1EC0933550 0.0579 1.3% 2 0.0290 0.04 SELECT pg_catalog.pg_class pg_catalog.pg_namespace +# 16 0x7D752C8A15925978 0.0544 1.2% 60 0.0009 0.00 BEGIN SELECT +# 17 0x82AEF03891943FB3 0.0514 1.2% 2 0.0257 0.03 SELECT forum_group_list_vw +# 18 0x9AA827C1DF73EE43 0.0496 1.1% 17 0.0029 0.00 SELECT users news_bytes groups +# 19 0x4636BFC0875521C9 0.0447 1.0% 40 0.0011 0.00 SELECT supported_languages +# 20 0xB1C777CE6EBFE87E 0.0434 1.0% 17 0.0026 0.00 SELECT frs_package frs_release +# MISC 0xMISC 0.5823 13.4% 334 0.0017 0.0 <47 ITEMS> diff --git a/t/pt-query-digest/samples/pg-syslog-sample1 b/t/pt-query-digest/samples/pg-syslog-sample1 index 8c7f9a5c..ea03c8f7 100644 --- a/t/pt-query-digest/samples/pg-syslog-sample1 +++ b/t/pt-query-digest/samples/pg-syslog-sample1 @@ -1,12 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x43088A2EF12EB3EE 0.1661 38.2% 2 0.0830 1.00 0.11 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 2 0xD6F2B77706BEEB5F 0.0710 16.3% 1 0.0710 1.00 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 3 0x9213FC20E3993331 0.0464 10.7% 1 0.0464 1.00 0.00 SELECT foo -# 4 0x458CB071ADE822AC 0.0446 10.3% 6 0.0074 1.00 0.00 SELECT -# 5 0x60960AADCFD005F3 0.0426 9.8% 1 0.0426 1.00 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 6 0xA99588746B4C6438 0.0283 6.5% 1 0.0283 1.00 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace -# 7 0x32A1860329937485 0.0278 6.4% 1 0.0278 1.00 0.00 SELECT pg_catalog.pg_database pg_catalog.pg_roles -# MISC 0xMISC 0.0083 1.9% 1 0.0083 NS 0.0 <1 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x43088A2EF12EB3EE 0.1661 38.2% 2 0.0830 0.11 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 2 0xD6F2B77706BEEB5F 0.0710 16.3% 1 0.0710 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 3 0x9213FC20E3993331 0.0464 10.7% 1 0.0464 0.00 SELECT foo +# 4 0x458CB071ADE822AC 0.0446 10.3% 6 0.0074 0.00 SELECT +# 5 0x60960AADCFD005F3 0.0426 9.8% 1 0.0426 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 6 0xA99588746B4C6438 0.0283 6.5% 1 0.0283 0.00 SELECT pg_catalog.pg_class pg_catalog.pg_roles pg_catalog.pg_namespace +# 7 0x32A1860329937485 0.0278 6.4% 1 0.0278 0.00 SELECT pg_catalog.pg_database pg_catalog.pg_roles +# MISC 0xMISC 0.0083 1.9% 1 0.0083 0.0 <1 ITEMS> diff --git a/t/pt-query-digest/samples/rawlog001.txt b/t/pt-query-digest/samples/rawlog001.txt index 3f298ac0..b5722bb6 100644 --- a/t/pt-query-digest/samples/rawlog001.txt +++ b/t/pt-query-digest/samples/rawlog001.txt @@ -7,7 +7,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xCB5621E548E5497F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,7 +31,7 @@ SELECT c FROM t WHERE id=1\G # Query 2: 0 QPS, 0x concurrency, ID 0x774B2B0B59EBAC2C at byte 27 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -54,7 +54,7 @@ SELECT c FROM t WHERE id=1\G /* Hello, world! */ SELECT * FROM t2 LIMIT 1\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========= -# 1 0xCB5621E548E5497F 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT t -# 2 0x774B2B0B59EBAC2C 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT t? +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========= +# 1 0xCB5621E548E5497F 0.0000 0.0% 1 0.0000 0.00 SELECT t +# 2 0x774B2B0B59EBAC2C 0.0000 0.0% 1 0.0000 0.00 SELECT t? diff --git a/t/pt-query-digest/samples/slow001_distillreport.txt b/t/pt-query-digest/samples/slow001_distillreport.txt index 02f2165e..def4a6ea 100644 --- a/t/pt-query-digest/samples/slow001_distillreport.txt +++ b/t/pt-query-digest/samples/slow001_distillreport.txt @@ -5,7 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0x82E67ABEEDCA3249 at byte 0 _________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median @@ -33,7 +33,7 @@ SELECT n # Item 2: 0 QPS, 0x concurrency, ID 0x7AD070CD3F4121D5 at byte 359 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:45:10 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow001_report.txt b/t/pt-query-digest/samples/slow001_report.txt index 32440146..95077c7c 100644 --- a/t/pt-query-digest/samples/slow001_report.txt +++ b/t/pt-query-digest/samples/slow001_report.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0 ________ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median @@ -31,7 +31,7 @@ select sleep(2) from n\G # Query 2: 0 QPS, 0x concurrency, ID 0x3A99CC42AEDCCFCD at byte 359 ______ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:45:10 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow001_select_report.txt b/t/pt-query-digest/samples/slow001_select_report.txt index ae5b49c1..412fd390 100644 --- a/t/pt-query-digest/samples/slow001_select_report.txt +++ b/t/pt-query-digest/samples/slow001_select_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -24,7 +24,7 @@ select sleep(2) from n\G # Query 2: 0 QPS, 0x concurrency, ID 0x3A99CC42AEDCCFCD at byte 359 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow001_tablesreport.txt b/t/pt-query-digest/samples/slow001_tablesreport.txt index 7e92c56b..2de5f139 100644 --- a/t/pt-query-digest/samples/slow001_tablesreport.txt +++ b/t/pt-query-digest/samples/slow001_tablesreport.txt @@ -5,7 +5,7 @@ # Item 1: 0.03 QPS, 0.05x concurrency, ID 0x1161D7068EB79526 at byte 0 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-10-15 21:43:52 to 21:45:10 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt index 96083e5f..f81d43ff 100644 --- a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt +++ b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt @@ -1,7 +1,7 @@ --order-by attribute Rows_read doesn't exist, using Query_time:sum # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -43,7 +43,7 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 inner join db1.gonzo a using(gonzo) \G # Query 2: 0 QPS, 0x concurrency, ID 0x0FFE94ABA6A2A9E8 at byte 1334 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -86,7 +86,7 @@ WHERE vab3concept1upload='6994465'\G select vab3concept1id = '91848182522' from db4.vab3concept1upload where vab3concept1upload='6994465'\G # Query 3: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 2393 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -128,7 +128,7 @@ SET biz = '91848182522'\G select biz = '91848182522' from foo.bar \G # Query 4: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -171,7 +171,7 @@ WHERE fillze='899'\G select boop='bop: 899' from bizzle.bat where fillze='899'\G # Query 5: 0 QPS, 0x concurrency, ID 0xC22D235B07D1D774 at byte 1864 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -210,7 +210,7 @@ INSERT INTO db1.conch (word3, vid83) VALUES ('211', '18')\G # Query 6: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 815 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -249,7 +249,7 @@ INSERT INTO db3.vendor11gonzo (makef, bizzle) VALUES ('', 'Exact')\G # Query 7: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow002_iters_2.txt b/t/pt-query-digest/samples/slow002_iters_2.txt index 978f6d2d..178b2ea8 100644 --- a/t/pt-query-digest/samples/slow002_iters_2.txt +++ b/t/pt-query-digest/samples/slow002_iters_2.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -43,7 +43,7 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 inner join db1.gonzo a using(gonzo) \G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x66825DDC008FFA89 0.7261 95.3% 1 0.7261 1.00 0.00 UPDATE db?.tuningdetail_?_? db?.gonzo -# MISC 0xMISC 0.0360 4.7% 7 0.0051 NS 0.0 <6 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x66825DDC008FFA89 0.7261 95.3% 1 0.7261 0.00 UPDATE db?.tuningdetail_?_? db?.gonzo +# MISC 0xMISC 0.0360 4.7% 7 0.0051 0.0 <6 ITEMS> diff --git a/t/pt-query-digest/samples/slow002_orderbyreport.txt b/t/pt-query-digest/samples/slow002_orderbyreport.txt index 1c2c67fe..3b09e861 100644 --- a/t/pt-query-digest/samples/slow002_orderbyreport.txt +++ b/t/pt-query-digest/samples/slow002_orderbyreport.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 3374 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -44,7 +44,7 @@ select biz = '91848182522' from foo.bar \G # Query 2: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow002_report.txt b/t/pt-query-digest/samples/slow002_report.txt index 5482aa14..f383847f 100644 --- a/t/pt-query-digest/samples/slow002_report.txt +++ b/t/pt-query-digest/samples/slow002_report.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -42,7 +42,7 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 inner join db1.gonzo a using(gonzo) \G # Query 2: 0 QPS, 0x concurrency, ID 0x0FFE94ABA6A2A9E8 at byte 1334 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -85,7 +85,7 @@ WHERE vab3concept1upload='6994465'\G select vab3concept1id = '91848182522' from db4.vab3concept1upload where vab3concept1upload='6994465'\G # Query 3: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 3374 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -127,7 +127,7 @@ SET biz = '91848182522'\G select biz = '91848182522' from foo.bar \G # Query 4: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -170,7 +170,7 @@ WHERE fillze='899'\G select boop='bop: 899' from bizzle.bat where fillze='899'\G # Query 5: 0 QPS, 0x concurrency, ID 0xC22D235B07D1D774 at byte 1864 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -209,7 +209,7 @@ INSERT INTO db1.conch (word3, vid83) VALUES ('211', '18')\G # Query 6: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 815 ______ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -248,7 +248,7 @@ INSERT INTO db3.vendor11gonzo (makef, bizzle) VALUES ('', 'Exact')\G # Query 7: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow002_report_filtered.txt b/t/pt-query-digest/samples/slow002_report_filtered.txt index e9d7411c..10056cbf 100644 --- a/t/pt-query-digest/samples/slow002_report_filtered.txt +++ b/t/pt-query-digest/samples/slow002_report_filtered.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow003_report.txt b/t/pt-query-digest/samples/slow003_report.txt index df294d96..faeee997 100644 --- a/t/pt-query-digest/samples/slow003_report.txt +++ b/t/pt-query-digest/samples/slow003_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow004_report.txt b/t/pt-query-digest/samples/slow004_report.txt index c9af29c6..7f4b2496 100644 --- a/t/pt-query-digest/samples/slow004_report.txt +++ b/t/pt-query-digest/samples/slow004_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xB16C9E5B3D9C484F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow006-order-by-re.txt b/t/pt-query-digest/samples/slow006-order-by-re.txt index 7df9b322..51ffe8b9 100644 --- a/t/pt-query-digest/samples/slow006-order-by-re.txt +++ b/t/pt-query-digest/samples/slow006-order-by-re.txt @@ -1,7 +1,7 @@ # Query 1: 0.05 QPS, 0x concurrency, ID 0xA20C29AF174CE545 at byte 1833 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0x concurrency, ID 0xD4CD74934382A184 at byte 1469 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow006_AR_1.txt b/t/pt-query-digest/samples/slow006_AR_1.txt index dad9c923..adb32dd6 100644 --- a/t/pt-query-digest/samples/slow006_AR_1.txt +++ b/t/pt-query-digest/samples/slow006_AR_1.txt @@ -1,7 +1,7 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median @@ -40,7 +40,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow006_AR_2.txt b/t/pt-query-digest/samples/slow006_AR_2.txt index c4338fc7..eab578a5 100644 --- a/t/pt-query-digest/samples/slow006_AR_2.txt +++ b/t/pt-query-digest/samples/slow006_AR_2.txt @@ -2,7 +2,7 @@ # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow006_AR_4.txt b/t/pt-query-digest/samples/slow006_AR_4.txt index 22372b61..aa7882c6 100644 --- a/t/pt-query-digest/samples/slow006_AR_4.txt +++ b/t/pt-query-digest/samples/slow006_AR_4.txt @@ -1,7 +1,7 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median @@ -40,7 +40,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow006_AR_5.txt b/t/pt-query-digest/samples/slow006_AR_5.txt index 4705e25c..3d6b8ac2 100644 --- a/t/pt-query-digest/samples/slow006_AR_5.txt +++ b/t/pt-query-digest/samples/slow006_AR_5.txt @@ -2,7 +2,7 @@ # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow006_report.txt b/t/pt-query-digest/samples/slow006_report.txt index 76359986..166ac17c 100644 --- a/t/pt-query-digest/samples/slow006_report.txt +++ b/t/pt-query-digest/samples/slow006_report.txt @@ -1,7 +1,7 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow007_explain_1-55.txt b/t/pt-query-digest/samples/slow007_explain_1-55.txt index 267ec4c8..b17c8065 100644 --- a/t/pt-query-digest/samples/slow007_explain_1-55.txt +++ b/t/pt-query-digest/samples/slow007_explain_1-55.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # EXPLAIN sparkline: I # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 diff --git a/t/pt-query-digest/samples/slow007_explain_2-51.txt b/t/pt-query-digest/samples/slow007_explain_2-51.txt index 4f89e6a1..61fd31c7 100644 --- a/t/pt-query-digest/samples/slow007_explain_2-51.txt +++ b/t/pt-query-digest/samples/slow007_explain_2-51.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # EXPLAIN sparkline: I # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 diff --git a/t/pt-query-digest/samples/slow007_explain_3.txt b/t/pt-query-digest/samples/slow007_explain_3.txt index 424b6b5f..534706ae 100644 --- a/t/pt-query-digest/samples/slow007_explain_3.txt +++ b/t/pt-query-digest/samples/slow007_explain_3.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -34,6 +34,6 @@ SELECT fruit FROM trees\G # EXPLAIN failed: DBD::mysql::st execute failed: Table 'food.trees' doesn't exist [for Statement "EXPLAIN /*!50100 PARTITIONS */ SELECT fruit FROM trees"] at line ?. # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ========= ======== -# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT trees +# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item +# ==== ================== ============= ===== ====== ===== ========== ============ +# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 SELECT trees diff --git a/t/pt-query-digest/samples/slow007_explain_4.txt b/t/pt-query-digest/samples/slow007_explain_4.txt index e4fe95e8..f7fff1f3 100644 --- a/t/pt-query-digest/samples/slow007_explain_4.txt +++ b/t/pt-query-digest/samples/slow007_explain_4.txt @@ -1,5 +1,5 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========== -# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 1.00 0.00 I SELECT trees +# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item +# ==== ================== ============= ===== ====== ===== ======= ============ +# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 I SELECT trees diff --git a/t/pt-query-digest/samples/slow008_report.txt b/t/pt-query-digest/samples/slow008_report.txt index e91388f9..9cc5ccf3 100644 --- a/t/pt-query-digest/samples/slow008_report.txt +++ b/t/pt-query-digest/samples/slow008_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xC72BF45D68E35A6E at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,7 +32,7 @@ SELECT MIN(id),MAX(id) FROM tbl\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -59,7 +59,7 @@ SET NAMES utf8\G # Query 3: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: |^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow010_reportbyfile.txt b/t/pt-query-digest/samples/slow010_reportbyfile.txt index 2e2d4215..07278f86 100644 --- a/t/pt-query-digest/samples/slow010_reportbyfile.txt +++ b/t/pt-query-digest/samples/slow010_reportbyfile.txt @@ -5,7 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xE0976A52E15A18AC at byte 0 _________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow011_report.txt b/t/pt-query-digest/samples/slow011_report.txt index 577ee8b7..75b8cad7 100644 --- a/t/pt-query-digest/samples/slow011_report.txt +++ b/t/pt-query-digest/samples/slow011_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.02 +# Scores: V/M = 0.02 # Query_time sparkline: |^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,7 +28,7 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 663 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report.txt b/t/pt-query-digest/samples/slow013_report.txt index 48daf735..dc13096b 100644 --- a/t/pt-query-digest/samples/slow013_report.txt +++ b/t/pt-query-digest/samples/slow013_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median @@ -29,7 +29,7 @@ SHOW STATUS\G # Query 2: 0 QPS, 0x concurrency, ID 0x3AEAAD0E15D725B5 at byte 600 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median @@ -57,7 +57,7 @@ SET autocommit=0\G # Query 3: 0 QPS, 0x concurrency, ID 0x813031B8BBC3B329 at byte 782 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median @@ -85,7 +85,7 @@ commit\G # Query 4: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 385 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: |^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt b/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt index 01749d56..943f468c 100644 --- a/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt +++ b/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt @@ -15,7 +15,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median @@ -47,7 +47,7 @@ SHOW STATUS\G # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.15 +# Scores: V/M = 0.15 # Query_time sparkline: |^ ^ | # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow013_report_limit.txt b/t/pt-query-digest/samples/slow013_report_limit.txt index 62f12054..d08830a8 100644 --- a/t/pt-query-digest/samples/slow013_report_limit.txt +++ b/t/pt-query-digest/samples/slow013_report_limit.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow013_report_outliers.txt b/t/pt-query-digest/samples/slow013_report_outliers.txt index 0096c271..2e7cfa9b 100644 --- a/t/pt-query-digest/samples/slow013_report_outliers.txt +++ b/t/pt-query-digest/samples/slow013_report_outliers.txt @@ -5,7 +5,7 @@ # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.15 +# Scores: V/M = 0.15 # Query_time sparkline: |^ ^ | # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median @@ -32,7 +32,7 @@ mytopuser # Item 2: 0 QPS, 0x concurrency, ID 0x8F4C76E92F07EABE at byte 600 _______ # This item is included in the report because it matches --outliers. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow013_report_profile.txt b/t/pt-query-digest/samples/slow013_report_profile.txt index 7ace77bd..99245089 100644 --- a/t/pt-query-digest/samples/slow013_report_profile.txt +++ b/t/pt-query-digest/samples/slow013_report_profile.txt @@ -1,8 +1,8 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x31DA25F95494CA95 0.1494 99.9% 1 0.1494 1.00 0.00 SHOW STATUS -# 2 0x3AEAAD0E15D725B5 0.0001 0.1% 2 0.0000 1.00 0.00 SET -# 3 0x813031B8BBC3B329 0.0000 0.0% 1 0.0000 1.00 0.00 COMMIT -# MISC 0xMISC 0.0000 0.0% 1 0.0000 NS 0.0 <1 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =========== +# 1 0x31DA25F95494CA95 0.1494 99.9% 1 0.1494 0.00 SHOW STATUS +# 2 0x3AEAAD0E15D725B5 0.0001 0.1% 2 0.0000 0.00 SET +# 3 0x813031B8BBC3B329 0.0000 0.0% 1 0.0000 0.00 COMMIT +# MISC 0xMISC 0.0000 0.0% 1 0.0000 0.0 <1 ITEMS> diff --git a/t/pt-query-digest/samples/slow013_report_user.txt b/t/pt-query-digest/samples/slow013_report_user.txt index 49f20069..0a350d53 100644 --- a/t/pt-query-digest/samples/slow013_report_user.txt +++ b/t/pt-query-digest/samples/slow013_report_user.txt @@ -5,7 +5,7 @@ # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.15 +# Scores: V/M = 0.15 # Query_time sparkline: |^ ^ | # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median @@ -32,7 +32,7 @@ mytopuser # Item 2: 0 QPS, 0x concurrency, ID 0x8F4C76E92F07EABE at byte 600 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow014_report.txt b/t/pt-query-digest/samples/slow014_report.txt index 0931345b..0cdadc56 100644 --- a/t/pt-query-digest/samples/slow014_report.txt +++ b/t/pt-query-digest/samples/slow014_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 1313 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow018_report.txt b/t/pt-query-digest/samples/slow018_report.txt index 61fc6497..a3642b3b 100644 --- a/t/pt-query-digest/samples/slow018_report.txt +++ b/t/pt-query-digest/samples/slow018_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6083030C4A5D8996 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow019_report.txt b/t/pt-query-digest/samples/slow019_report.txt index ba1fbb6c..0d04dbdb 100644 --- a/t/pt-query-digest/samples/slow019_report.txt +++ b/t/pt-query-digest/samples/slow019_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.02 +# Scores: V/M = 0.02 # Query_time sparkline: |^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,7 +28,7 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow019_report_noza.txt b/t/pt-query-digest/samples/slow019_report_noza.txt index 634bc105..0e01d7e0 100644 --- a/t/pt-query-digest/samples/slow019_report_noza.txt +++ b/t/pt-query-digest/samples/slow019_report_noza.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.02 +# Scores: V/M = 0.02 # Query_time sparkline: |^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,7 +28,7 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow023.txt b/t/pt-query-digest/samples/slow023.txt index 34b1e13a..4760f68f 100644 --- a/t/pt-query-digest/samples/slow023.txt +++ b/t/pt-query-digest/samples/slow023.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E38374648788E52 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow024.txt b/t/pt-query-digest/samples/slow024.txt index 1f97fb73..0dd9e86c 100644 --- a/t/pt-query-digest/samples/slow024.txt +++ b/t/pt-query-digest/samples/slow024.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x93E5C17055D970BE at byte 514419 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median @@ -31,7 +31,7 @@ INSERT INTO `film_actor` VALUES (1,1,'2006-02-15 10:05:03') /*... omitted ...*/O # Query 2: 0 QPS, 0x concurrency, ID 0xA1C3EE4F5996E672 at byte 342942 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median @@ -61,7 +61,7 @@ INSERT IGNORE INTO `film_actor` VALUES (1,1,'2006-02-15 10:05:03') /*... omitted # Query 3: 0 QPS, 0x concurrency, ID 0xA2C576176F348267 at byte 171471 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow028.txt b/t/pt-query-digest/samples/slow028.txt index bc8735e9..99ea2ce2 100644 --- a/t/pt-query-digest/samples/slow028.txt +++ b/t/pt-query-digest/samples/slow028.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x182FF6A853858893 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow032.txt b/t/pt-query-digest/samples/slow032.txt index e9eb9152..d3000c2a 100644 --- a/t/pt-query-digest/samples/slow032.txt +++ b/t/pt-query-digest/samples/slow032.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-precise-since-until.txt b/t/pt-query-digest/samples/slow033-precise-since-until.txt index 379a99d1..85e224d7 100644 --- a/t/pt-query-digest/samples/slow033-precise-since-until.txt +++ b/t/pt-query-digest/samples/slow033-precise-since-until.txt @@ -1,7 +1,7 @@ # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median @@ -33,7 +33,7 @@ SELECT * FROM bar\G # Query 2: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow033-rtm-event-1h.txt b/t/pt-query-digest/samples/slow033-rtm-event-1h.txt index 6fd36a99..8ea873cc 100644 --- a/t/pt-query-digest/samples/slow033-rtm-event-1h.txt +++ b/t/pt-query-digest/samples/slow033-rtm-event-1h.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median @@ -32,6 +32,6 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-event-25h.txt b/t/pt-query-digest/samples/slow033-rtm-event-25h.txt index 5b74d609..51e164f2 100644 --- a/t/pt-query-digest/samples/slow033-rtm-event-25h.txt +++ b/t/pt-query-digest/samples/slow033-rtm-event-25h.txt @@ -1,7 +1,7 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-25 11:19:27 to 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median @@ -32,6 +32,6 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 2 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 2 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt b/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt index 5f652dba..918f7d0e 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median @@ -32,13 +32,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median @@ -69,13 +69,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median @@ -107,7 +107,7 @@ SELECT * FROM bar\G # Query 2: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median @@ -138,14 +138,14 @@ SELECT * FROM bar\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 66.7% 2 0.0000 1.00 0.00 SELECT bar -# 2 0xAC1BF726F2AB10C5 0.0000 33.3% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 66.7% 2 0.0000 0.00 SELECT bar +# 2 0xAC1BF726F2AB10C5 0.0000 33.3% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median @@ -176,6 +176,6 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt index 3243cf0e..6b5f5ca5 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median @@ -32,13 +32,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median @@ -69,13 +69,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median @@ -106,13 +106,13 @@ SELECT * FROM foo\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 2 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 2 0.0000 0.00 SELECT bar # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median @@ -143,13 +143,13 @@ SELECT * FROM bar\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median @@ -180,6 +180,6 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt index 7d406587..d883cd9a 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median @@ -32,13 +32,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median @@ -69,13 +69,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 344 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:19:30 # Attribute pct total min max avg 95% stddev median @@ -106,6 +106,6 @@ SELECT * FROM foo\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 0.00 SELECT bar diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt index 9a101610..d05a1b4a 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median @@ -32,13 +32,13 @@ SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median @@ -69,13 +69,13 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 344 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:19:30 # Attribute pct total min max avg 95% stddev median @@ -106,13 +106,13 @@ SELECT * FROM foo\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 0.00 SELECT bar # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:19:31 # Attribute pct total min max avg 95% stddev median @@ -143,13 +143,13 @@ SELECT * FROM bar\G SELECT * FROM bar\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x07AEF8EFAB3FA3CE 0.0000 100.0% 1 0.0000 0.00 SELECT bar # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median @@ -180,13 +180,13 @@ SELECT * FROM bar\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median @@ -217,6 +217,6 @@ SELECT * FROM foo\G SELECT * FROM foo\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 1.00 0.00 SELECT foo +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAC1BF726F2AB10C5 0.0000 100.0% 1 0.0000 0.00 SELECT foo diff --git a/t/pt-query-digest/samples/slow033-since-Nd.txt b/t/pt-query-digest/samples/slow033-since-Nd.txt index d196607b..bbe54d68 100644 --- a/t/pt-query-digest/samples/slow033-since-Nd.txt +++ b/t/pt-query-digest/samples/slow033-since-Nd.txt @@ -1,7 +1,7 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-25 11:19:27 to 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median @@ -33,7 +33,7 @@ SELECT * FROM foo\G # Query 2: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow033-since-yymmdd.txt b/t/pt-query-digest/samples/slow033-since-yymmdd.txt index bb2c52e7..17b22589 100644 --- a/t/pt-query-digest/samples/slow033-since-yymmdd.txt +++ b/t/pt-query-digest/samples/slow033-since-yymmdd.txt @@ -1,7 +1,7 @@ # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median @@ -33,7 +33,7 @@ SELECT * FROM bar\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-27 11:30:00 to 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt b/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt index 169a3421..73ce9512 100644 --- a/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt +++ b/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow033-until-date.txt b/t/pt-query-digest/samples/slow033-until-date.txt index cd331351..12f34ffa 100644 --- a/t/pt-query-digest/samples/slow033-until-date.txt +++ b/t/pt-query-digest/samples/slow033-until-date.txt @@ -1,7 +1,7 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-07-25 11:19:27 to 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt index f108bdec..2a8495c8 100644 --- a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt +++ b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xABE9508269335CD1 at byte 1866 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Lock_time sparkline: | ^| # Time range: all events occurred at 2009-08-05 13:00:27 # Attribute pct total min max avg 95% stddev median @@ -38,7 +38,7 @@ DELETE FROM forest WHERE animal = 'dead'\G select * from forest WHERE animal = 'dead'\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 934 -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.03 +# Scores: V/M = 0.03 # Lock_time sparkline: | _^ | # Time range: 2009-08-05 11:00:27 to 13:00:27 # Attribute pct total min max avg 95% stddev median @@ -71,7 +71,7 @@ select * from forest WHERE animal = 'dead'\G SELECT * FROM foo\G # Query 3: 0 QPS, 0x concurrency, ID 0xB79802214165F670 at byte 1267 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.73 +# Scores: V/M = 0.73 # Lock_time sparkline: | ^^ | # Time range: all events occurred at 2009-08-05 12:00:27 # Attribute pct total min max avg 95% stddev median @@ -102,7 +102,7 @@ SELECT * FROM foo\G INSERT INTO tbl VALUES ('a', 'b')\G # Query 4: 0 QPS, 0x concurrency, ID 0x1F9B2F47A843D460 at byte 333 ______ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Lock_time sparkline: | ^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median @@ -132,7 +132,7 @@ INSERT INTO tbl VALUES ('a', 'b')\G SELECT id FROM tbl WHERE id = 1\G # Query 5: 0 QPS, 0x concurrency, ID 0x3F1024B96D9D469E at byte 625 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Lock_time sparkline: |^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median @@ -166,10 +166,10 @@ SELECT id FROM tbl WHERE id = 1\G SELECT COUNT(*) FROM blah WHERE col > 2\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== =============== ===== ========= ==== ===== ===== -# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 0.00 DELETE forest -# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 1.00 0.03 SELECT foo -# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 1.00 0.73 INSERT tbl -# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.50 0.00 SELECT tbl -# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 0.00 SELECT blah +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== =============== ===== ========= ===== ========== +# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 DELETE forest +# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 0.03 SELECT foo +# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 0.73 INSERT tbl +# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.00 SELECT tbl +# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 SELECT blah diff --git a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt index 39c0d365..be7bdd41 100644 --- a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt +++ b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt @@ -1,6 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xABE9508269335CD1 at byte 1866 _____ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2009-08-05 13:00:27 # Attribute pct total min max avg 95% stddev median @@ -38,7 +38,7 @@ DELETE FROM forest WHERE animal = 'dead'\G select * from forest WHERE animal = 'dead'\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 934 -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.03 +# Scores: V/M = 0.03 # Query_time sparkline: | ^ | # Time range: 2009-08-05 11:00:27 to 13:00:27 # Attribute pct total min max avg 95% stddev median @@ -71,7 +71,7 @@ select * from forest WHERE animal = 'dead'\G SELECT * FROM foo\G # Query 3: 0 QPS, 0x concurrency, ID 0xB79802214165F670 at byte 1267 _____ -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.73 +# Scores: V/M = 0.73 # Query_time sparkline: | ^ ^ | # Time range: all events occurred at 2009-08-05 12:00:27 # Attribute pct total min max avg 95% stddev median @@ -102,7 +102,7 @@ SELECT * FROM foo\G INSERT INTO tbl VALUES ('a', 'b')\G # Query 4: 0 QPS, 0x concurrency, ID 0x1F9B2F47A843D460 at byte 333 ______ -# Scores: Apdex = 0.50 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median @@ -132,7 +132,7 @@ INSERT INTO tbl VALUES ('a', 'b')\G SELECT id FROM tbl WHERE id = 1\G # Query 5: 0 QPS, 0x concurrency, ID 0x3F1024B96D9D469E at byte 625 ______ -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median @@ -166,10 +166,10 @@ SELECT id FROM tbl WHERE id = 1\G SELECT COUNT(*) FROM blah WHERE col > 2\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== =============== ===== ========= ==== ===== ===== -# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 0.00 DELETE forest -# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 1.00 0.03 SELECT foo -# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 1.00 0.73 INSERT tbl -# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.50 0.00 SELECT tbl -# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 0.00 SELECT blah +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== =============== ===== ========= ===== ========== +# 1 0xABE9508269335CD1 1349.0001 98.9% 1 1349.0001 0.00 DELETE forest +# 2 0xAC1BF726F2AB10C5 2.9042 0.2% 4 0.7261 0.03 SELECT foo +# 3 0xB79802214165F670 0.7261 0.1% 2 0.3631 0.73 INSERT tbl +# 4 0x1F9B2F47A843D460 1.7261 0.1% 1 1.7261 0.00 SELECT tbl +# 5 0x3F1024B96D9D469E 9.0001 0.7% 1 9.0001 0.00 SELECT blah diff --git a/t/pt-query-digest/samples/slow035.txt b/t/pt-query-digest/samples/slow035.txt index e1262170..a554cc12 100644 --- a/t/pt-query-digest/samples/slow035.txt +++ b/t/pt-query-digest/samples/slow035.txt @@ -19,7 +19,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x727841EC88423713 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -57,7 +57,7 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G # Query 2: 0 QPS, 0x concurrency, ID 0x9E892D4B16D7BFC2 at byte 525 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -88,7 +88,7 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G SELECT * FROM blah WHERE something = 'important'\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x727841EC88423713 0.0000 0.0% 1 0.0000 1.00 0.00 INSERT db.v -# 2 0x9E892D4B16D7BFC2 0.0000 0.0% 1 0.0000 1.00 0.00 SELECT blah +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =========== +# 1 0x727841EC88423713 0.0000 0.0% 1 0.0000 0.00 INSERT db.v +# 2 0x9E892D4B16D7BFC2 0.0000 0.0% 1 0.0000 0.00 SELECT blah diff --git a/t/pt-query-digest/samples/slow037_report.txt b/t/pt-query-digest/samples/slow037_report.txt index ef75bee9..7b9de8ef 100644 --- a/t/pt-query-digest/samples/slow037_report.txt +++ b/t/pt-query-digest/samples/slow037_report.txt @@ -5,7 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xABCC9DEC8C43EEDC at byte 0 _________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median @@ -28,6 +28,6 @@ LOCK foo bar # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ======== ============= ===== ====== ==== ===== ============ -# 1 0x 0.0010 100.0% 1 0.0010 1.00 0.00 LOCK foo bar +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ======== ============= ===== ====== ===== ============ +# 1 0x 0.0010 100.0% 1 0.0010 0.00 LOCK foo bar diff --git a/t/pt-query-digest/samples/slow042-show-all-host.txt b/t/pt-query-digest/samples/slow042-show-all-host.txt index 3fcf79c3..e43cfb4a 100644 --- a/t/pt-query-digest/samples/slow042-show-all-host.txt +++ b/t/pt-query-digest/samples/slow042-show-all-host.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7CE9953EA3A36141 at byte 417 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-05 19:55:11 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow042.txt b/t/pt-query-digest/samples/slow042.txt index 89616f51..78cc99cb 100644 --- a/t/pt-query-digest/samples/slow042.txt +++ b/t/pt-query-digest/samples/slow042.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7CE9953EA3A36141 at byte 417 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-05 19:55:11 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow048.txt b/t/pt-query-digest/samples/slow048.txt index 3213fa47..9232d771 100644 --- a/t/pt-query-digest/samples/slow048.txt +++ b/t/pt-query-digest/samples/slow048.txt @@ -1,7 +1,7 @@ # Query 1: 1.33 QPS, 0.00x concurrency, ID 0x208AC308FD716D83 at byte 454 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2010-06-24 11:48:27 to 11:48:30 # Attribute pct total min max avg 95% stddev median @@ -28,6 +28,6 @@ SELECT * FROM `products` ORDER BY name, shape asc\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x208AC308FD716D83 0.0001 100.0% 4 0.0000 1.00 0.00 SELECT products +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== =============== +# 1 0x208AC308FD716D83 0.0001 100.0% 4 0.0000 0.00 SELECT products diff --git a/t/pt-query-digest/samples/slow049.txt b/t/pt-query-digest/samples/slow049.txt index e469e0ac..c9649a8e 100644 --- a/t/pt-query-digest/samples/slow049.txt +++ b/t/pt-query-digest/samples/slow049.txt @@ -10,16 +10,16 @@ # Query size 308 30 34 30.80 31.70 1.64 28.75 # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== =============== ===== ======== ==== ===== ====== -# 1 0x95AADD230F4EB56A 1000.0000 53.8% 2 500.0000 0.00 0.00 SELECT two -# 2 0x5081E1858C60FD05 500.0000 26.9% 1 500.0000 0.00 0.00 SELECT three -# 4 0x70E215C4BFED0080 50.0000 2.7% 5 10.0000 0.00 0.00 SELECT one -# MISC 0xMISC 310.0000 16.7% 2 155.0000 NS 0.0 <2 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== =============== ===== ======== ===== =========== +# 1 0x95AADD230F4EB56A 1000.0000 53.8% 2 500.0000 0.00 SELECT two +# 2 0x5081E1858C60FD05 500.0000 26.9% 1 500.0000 0.00 SELECT three +# 4 0x70E215C4BFED0080 50.0000 2.7% 5 10.0000 0.00 SELECT one +# MISC 0xMISC 310.0000 16.7% 2 155.0000 0.0 <2 ITEMS> # Query 1: 2 QPS, 1.00kx concurrency, ID 0x95AADD230F4EB56A at byte 886 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: 2010-06-24 11:48:34 to 11:48:35 # Attribute pct total min max avg 95% stddev median @@ -47,7 +47,7 @@ SELECT two FROM two WHERE id=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x5081E1858C60FD05 at byte 1013 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2010-06-24 11:48:35 # Attribute pct total min max avg 95% stddev median @@ -75,7 +75,7 @@ SELECT three FROM three WHERE id=?\G # Query 4: 1.25 QPS, 12.50x concurrency, ID 0x70E215C4BFED0080 at byte 633 # This item is included in the report because it matches --outliers. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: 2010-06-24 11:48:21 to 11:48:25 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow050.txt b/t/pt-query-digest/samples/slow050.txt index 052c252b..a1c87e49 100644 --- a/t/pt-query-digest/samples/slow050.txt +++ b/t/pt-query-digest/samples/slow050.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x305E73C51188758F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^| # Time range: all events occurred at 2010-06-24 11:48:00 # Attribute pct total min max avg 95% stddev median @@ -29,6 +29,6 @@ UPDATE mybbl_MBMessage SET groupId = (select groupId from Group_ where name = 'Guest')\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ======= ==== ===== ======== -# 1 0x305E73C51188758F 10.0000 100.0% 1 10.0000 0.00 0.00 UPDATE SELECT mybbl_MBMessage Group_ +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============== ===== ======= ===== ============= +# 1 0x305E73C51188758F 10.0000 100.0% 1 10.0000 0.00 UPDATE SELECT mybbl_MBMessage Group_ diff --git a/t/pt-query-digest/samples/slow051.txt b/t/pt-query-digest/samples/slow051.txt index be391ce9..11a68c8f 100644 --- a/t/pt-query-digest/samples/slow051.txt +++ b/t/pt-query-digest/samples/slow051.txt @@ -1,7 +1,7 @@ # Query 1: 0.20 QPS, 0.00x concurrency, ID 0xD989521B246E945B at byte 146 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:48:37 # Attribute pct total min max avg 95% stddev median @@ -27,6 +27,6 @@ LOAD DATA INFILE '/tmp/bar.txt' INTO db.tbl\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ====== -# 1 0xD989521B246E945B 0.0000 100.0% 2 0.0000 1.00 0.00 db.tbl +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ====== +# 1 0xD989521B246E945B 0.0000 100.0% 2 0.0000 0.00 db.tbl diff --git a/t/pt-query-digest/samples/slow052-apdex-t-0.1.txt b/t/pt-query-digest/samples/slow052-apdex-t-0.1.txt deleted file mode 100644 index da8b2774..00000000 --- a/t/pt-query-digest/samples/slow052-apdex-t-0.1.txt +++ /dev/null @@ -1,66 +0,0 @@ - -# Query 1: 0 QPS, 0x concurrency, ID 0x32B0659E6D13E5A2 at byte 16849 ____ -# This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [0.1], V/M = 0.48 -# Query_time sparkline: | ^ | -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 50 100 -# Exec time 74 308s 1s 5s 3s 5s 1s 3s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 50 100 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 51 4.59k 47 47 47 47 0 47 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'unsteady_table'\G -# SHOW CREATE TABLE `unsteady_table`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select very_variable_column from unsteady_table\G - -# Query 2: 0 QPS, 0x concurrency, ID 0x2F621C2B0611518C at byte 8582 _____ -# This item is included in the report because it matches --limit. -# Scores: Apdex = 0.00 [0.1], V/M = 0.00 -# Query_time sparkline: | ^ | -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 50 100 -# Exec time 25 105s 1s 1s 1s 1s 30ms 1s -# Lock time 0 0 0 0 0 0 0 0 -# Rows sent 50 100 1 1 1 1 0 1 -# Rows examine 0 0 0 0 0 0 0 0 -# Query size 48 4.39k 45 45 45 45 0 45 -# String: -# Hosts localhost -# Users root -# Query_time distribution -# 1us -# 10us -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ -# Tables -# SHOW TABLE STATUS LIKE 'steady_table'\G -# SHOW CREATE TABLE `steady_table`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select less_variable_column from steady_table\G - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ====== ==== ===== ========= -# 1 0x32B0659E6D13E5A2 308.4675 74.6% 100 3.0847 0.00 0.48 SELECT unsteady_table -# 2 0x2F621C2B0611518C 104.9344 25.4% 100 1.0493 0.00 0.00 SELECT steady_table diff --git a/t/pt-query-digest/samples/slow052.txt b/t/pt-query-digest/samples/slow052.txt index 5827481f..a67e5793 100644 --- a/t/pt-query-digest/samples/slow052.txt +++ b/t/pt-query-digest/samples/slow052.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x32B0659E6D13E5A2 at byte 16849 ____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.38 [1.0], V/M = 0.48 +# Scores: V/M = 0.48 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -31,7 +31,7 @@ select very_variable_column from unsteady_table\G # Query 2: 0 QPS, 0x concurrency, ID 0x2F621C2B0611518C at byte 8582 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 0.72 [1.0], V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -60,7 +60,7 @@ select very_variable_column from unsteady_table\G select less_variable_column from steady_table\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============== ===== ====== ==== ===== ========= -# 1 0x32B0659E6D13E5A2 308.4675 74.6% 100 3.0847 0.38 0.48 SELECT unsteady_table -# 2 0x2F621C2B0611518C 104.9344 25.4% 100 1.0493 0.72 0.00 SELECT steady_table +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============== ===== ====== ===== ============== +# 1 0x32B0659E6D13E5A2 308.4675 74.6% 100 3.0847 0.48 SELECT unsteady_table +# 2 0x2F621C2B0611518C 104.9344 25.4% 100 1.0493 0.00 SELECT steady_table diff --git a/t/pt-query-digest/samples/slow053.txt b/t/pt-query-digest/samples/slow053.txt index c97655da..9a115bc1 100644 --- a/t/pt-query-digest/samples/slow053.txt +++ b/t/pt-query-digest/samples/slow053.txt @@ -1,7 +1,7 @@ # Query 1: 2 QPS, 1.90x concurrency, ID 0xA4EAD36B5CEB1C13 at byte 1044 __ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.01 +# Scores: V/M = 0.01 # Query_time sparkline: | ^^ | # Time range: 2011-02-08 12:00:09 to 12:00:10 # Attribute pct total min max avg 95% stddev median @@ -32,7 +32,7 @@ SELECT * FROM blah WHERE id IS NOT NULL\G # Query 2: 1.50 QPS, 0.03x concurrency, ID 0xAC0EC652760FEEB3 at byte 913 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.03 +# Scores: V/M = 0.03 # Query_time sparkline: | ^ _ | # Time range: 2011-02-08 12:00:06 to 12:00:08 # Attribute pct total min max avg 95% stddev median @@ -63,7 +63,7 @@ SELECT * FROM bar WHERE id=12\G # Query 3: 1.25 QPS, 0.00x concurrency, ID 0xBB11C6B7F3BAAB30 at byte 521 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2011-02-08 12:00:01 to 12:00:05 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow054.txt b/t/pt-query-digest/samples/slow054.txt index 777f8908..0c56aafe 100644 --- a/t/pt-query-digest/samples/slow054.txt +++ b/t/pt-query-digest/samples/slow054.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xBB11C6B7F3BAAB30 at byte 1058 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2011-02-08 12:00:01 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/slow055.txt b/t/pt-query-digest/samples/slow055.txt index c920a1ee..b6a8779b 100644 --- a/t/pt-query-digest/samples/slow055.txt +++ b/t/pt-query-digest/samples/slow055.txt @@ -5,7 +5,7 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xE9800998ECF8427E at byte 420 _______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.01 +# Scores: V/M = 0.01 # Query_time sparkline: |^ ^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow056.txt b/t/pt-query-digest/samples/slow056.txt index 8e6571d3..1643ae9b 100644 --- a/t/pt-query-digest/samples/slow056.txt +++ b/t/pt-query-digest/samples/slow056.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x54E0BB9E70EAA792 at byte 596 ______ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2012-11-23 19:56:06 # Attribute pct total min max avg 95% stddev median @@ -38,7 +38,7 @@ select b = b + 30 from t where user_id=1\G # Query 2: 0 QPS, 0x concurrency, ID 0xE9800998ECF8427E at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2012-11-23 19:56:06 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump001.txt b/t/pt-query-digest/samples/tcpdump001.txt index cc5de25d..856c2dd6 100644 --- a/t/pt-query-digest/samples/tcpdump001.txt +++ b/t/pt-query-digest/samples/tcpdump001.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xA3C9C49321D65C30 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 09:50:16.805123 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump002_report.txt b/t/pt-query-digest/samples/tcpdump002_report.txt index 111be585..820740b7 100644 --- a/t/pt-query-digest/samples/tcpdump002_report.txt +++ b/t/pt-query-digest/samples/tcpdump002_report.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 1470 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 11:00:13.118191 # Attribute pct total min max avg 95% stddev median @@ -29,7 +29,7 @@ administrator command: Connect\G # Query 2: 0 QPS, 0x concurrency, ID 0xE3A3649C5FAC418D at byte 2449 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 11:00:13.118643 # Attribute pct total min max avg 95% stddev median @@ -58,7 +58,7 @@ select @@version_comment limit 1\G # Query 3: 0 QPS, 0x concurrency, ID 0xAE5A83B27932AB98 at byte 3298 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 11:00:13.119079 # Attribute pct total min max avg 95% stddev median @@ -87,7 +87,7 @@ select "paris in the the spring" as trick\G # Query 4: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 4186 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2009-04-12 11:00:13.119487 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump003.txt b/t/pt-query-digest/samples/tcpdump003.txt index d404e3b6..973492a3 100644 --- a/t/pt-query-digest/samples/tcpdump003.txt +++ b/t/pt-query-digest/samples/tcpdump003.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 1455 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 12:41:46.357853 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump012.txt b/t/pt-query-digest/samples/tcpdump012.txt index cc5de25d..856c2dd6 100644 --- a/t/pt-query-digest/samples/tcpdump012.txt +++ b/t/pt-query-digest/samples/tcpdump012.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xA3C9C49321D65C30 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 09:50:16.805123 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump017_report.txt b/t/pt-query-digest/samples/tcpdump017_report.txt index 904558df..bd81c04c 100644 --- a/t/pt-query-digest/samples/tcpdump017_report.txt +++ b/t/pt-query-digest/samples/tcpdump017_report.txt @@ -10,7 +10,7 @@ # Query 1: 2.13 QPS, 0.36x concurrency, ID 0xE3A3649C5FAC418D at byte 2548 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.19 +# Scores: V/M = 0.19 # Query_time sparkline: | ^ ^ | # Time range: 2009-04-12 11:00:13.118643 to 11:00:14.999999 # Attribute pct total min max avg 95% stddev median @@ -36,6 +36,6 @@ select @@version_comment limit 1\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ====== -# 1 0xE3A3649C5FAC418D 0.6696 100.0% 4 0.1674 1.00 0.19 SELECT +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ====== +# 1 0xE3A3649C5FAC418D 0.6696 100.0% 4 0.1674 0.19 SELECT diff --git a/t/pt-query-digest/samples/tcpdump021.txt b/t/pt-query-digest/samples/tcpdump021.txt index 7f5dbb6d..5348e285 100644 --- a/t/pt-query-digest/samples/tcpdump021.txt +++ b/t/pt-query-digest/samples/tcpdump021.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT i FROM d.t WHERE i=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x3F79759E7FA2F117 at byte 1106 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637892 # Attribute pct total min max avg 95% stddev median @@ -69,7 +69,7 @@ SELECT i FROM d.t WHERE i="3"\G # Query 3: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 1850 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: all events occurred at 2009-12-08 09:23:49.638381 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump022.txt b/t/pt-query-digest/samples/tcpdump022.txt index 04fe6b9e..ed0544c3 100644 --- a/t/pt-query-digest/samples/tcpdump022.txt +++ b/t/pt-query-digest/samples/tcpdump022.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xC30A1A850F4E510F at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 13:41:12.811188 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT i,j FROM d.t2 WHERE i=? AND j=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x26EEAE2EADD904A1 at byte 1330 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 13:41:12.811591 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump023.txt b/t/pt-query-digest/samples/tcpdump023.txt index 34b31182..b5a833e4 100644 --- a/t/pt-query-digest/samples/tcpdump023.txt +++ b/t/pt-query-digest/samples/tcpdump023.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E77A2947B4BC375 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:14:55.951863 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT * FROM d.t3 WHERE v=? OR c=? OR f=?\G # Query 2: 0 QPS, 0x concurrency, ID 0xA0B1C345E8654C18 at byte 1540 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:14:55.952344 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump024.txt b/t/pt-query-digest/samples/tcpdump024.txt index 68a53f11..2bb6d368 100644 --- a/t/pt-query-digest/samples/tcpdump024.txt +++ b/t/pt-query-digest/samples/tcpdump024.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E77A2947B4BC375 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:33:13.711351 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT * FROM d.t3 WHERE v=? OR c=? OR f=?\G # Query 2: 0 QPS, 0x concurrency, ID 0xA0B1C345E8654C18 at byte 1540 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:33:13.711642 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump025.txt b/t/pt-query-digest/samples/tcpdump025.txt index 462c7908..921a5e50 100644 --- a/t/pt-query-digest/samples/tcpdump025.txt +++ b/t/pt-query-digest/samples/tcpdump025.txt @@ -1,7 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x72B6E5BC2632931C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:44:52.709181 # Attribute pct total min max avg 95% stddev median @@ -34,7 +34,7 @@ SELECT * FROM d.t WHERE 1 LIMIT 1;\G # Query 2: 0 QPS, 0x concurrency, ID 0xDDF5E71E9A66B752 at byte 1014 _____ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:44:52.709597 # Attribute pct total min max avg 95% stddev median diff --git a/t/pt-query-digest/samples/tcpdump033.txt b/t/pt-query-digest/samples/tcpdump033.txt index 08a06069..c91bcb08 100644 --- a/t/pt-query-digest/samples/tcpdump033.txt +++ b/t/pt-query-digest/samples/tcpdump033.txt @@ -12,7 +12,7 @@ # Query 1: 2.03k QPS, 0.28x concurrency, ID 0x6EE88728F6F29C72 at byte 800 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-12-18 08:44:07.235011 to 08:44:07.238467 # Attribute pct total min max avg 95% stddev median @@ -47,7 +47,7 @@ select * from d.t where name="adam"\G # Query 2: 1.17k QPS, 0.19x concurrency, ID 0xECBCD0412B5E497A at byte 9215 # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: 2009-12-18 08:44:07.234727 to 08:44:07.238999 # Attribute pct total min max avg 95% stddev median @@ -80,7 +80,7 @@ select * from d.t where name="daniel"\G # Query 3: 1.70k QPS, 0x concurrency, ID 0x559914DA8A7B7F28 at byte 8202 _ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | | # Time range: 2009-12-18 08:44:07.236509 to 08:44:07.238274 # Attribute pct total min max avg 95% stddev median @@ -105,11 +105,11 @@ select * from d.t where name="daniel"\G DEALLOCATE PREPARE 4\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0x6EE88728F6F29C72 0.0010 54.4% 7 0.0001 1.00 0.00 SELECT d.t -# 2 0xECBCD0412B5E497A 0.0008 45.6% 5 0.0002 1.00 0.00 SELECT d.t -# 3 0x559914DA8A7B7F28 0.0000 0.0% 3 0.0000 1.00 0.00 +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0x6EE88728F6F29C72 0.0010 54.4% 7 0.0001 0.00 SELECT d.t +# 2 0xECBCD0412B5E497A 0.0008 45.6% 5 0.0002 0.00 SELECT d.t +# 3 0x559914DA8A7B7F28 0.0000 0.0% 3 0.0000 0.00 # Prepared statements # Rank Query ID PREP PREP Response EXEC EXEC Response Item diff --git a/t/pt-query-digest/samples/tcpdump041.txt b/t/pt-query-digest/samples/tcpdump041.txt index 09edac73..9dd24427 100644 --- a/t/pt-query-digest/samples/tcpdump041.txt +++ b/t/pt-query-digest/samples/tcpdump041.txt @@ -10,7 +10,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 +# Scores: V/M = 0.00 # Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median @@ -42,9 +42,9 @@ PREPARE SELECT i FROM d.t WHERE i=?\G SELECT i FROM d.t WHERE i=?\G # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M Item -# ==== ================== ============= ===== ====== ==== ===== ========== -# 1 0xAA8E9FA785927259 0.0003 100.0% 1 0.0003 1.00 0.00 SELECT d.t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========== +# 1 0xAA8E9FA785927259 0.0003 100.0% 1 0.0003 0.00 SELECT d.t # Prepared statements # Rank Query ID PREP PREP Response EXEC EXEC Response Item diff --git a/t/pt-query-digest/slowlog_analyses.t b/t/pt-query-digest/slowlog_analyses.t index 978d3994..a7c612fa 100644 --- a/t/pt-query-digest/slowlog_analyses.t +++ b/t/pt-query-digest/slowlog_analyses.t @@ -391,7 +391,6 @@ ok( # ############################################################################# # Issue 1124: Make mk-query-digest profile include variance-to-mean ratio -# Issue 1054: Add Apdex scores to mk-query-digest report # ############################################################################# ok( no_diff( @@ -401,14 +400,6 @@ ok( 'Analysis for slow052 (Apdex and V/M)', ); -ok( - no_diff( - sub { pt_query_digest::main(@args, qw(--apdex-t 0.1), '--report-format', 'query_report,profile', $sample.'slow052.txt') }, - "t/pt-query-digest/samples/slow052-apdex-t-0.1.txt", - ), - 'Analysis for slow052 (Apdex T = 0.1)', -); - # ############################################################################# # Bug 821694: pt-query-digest doesn't recognize hex InnoDB txn IDs # ############################################################################# From 5495945c7e286e5e959d794d1be54f262cd60780 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 13:24:02 -0300 Subject: [PATCH 03/34] EventAggregator: Remove apdex code --- lib/EventAggregator.pm | 77 ----------------------------------------- t/lib/EventAggregator.t | 62 ++------------------------------- 2 files changed, 3 insertions(+), 136 deletions(-) diff --git a/lib/EventAggregator.pm b/lib/EventAggregator.pm index c1353016..8c06c12d 100644 --- a/lib/EventAggregator.pm +++ b/lib/EventAggregator.pm @@ -619,16 +619,6 @@ sub calculate_statistical_metrics { $classes->{$class}->{$attrib}->{all}, $classes->{$class}->{$attrib} ); - - # Apdex (http://code.google.com/p/maatkit/issues/detail?id=1054) - if ( $args{apdex_t} && $attrib eq 'Query_time' ) { - $class_metrics->{$class}->{$attrib}->{apdex_t} = $args{apdex_t}; - $class_metrics->{$class}->{$attrib}->{apdex} - = $self->calculate_apdex( - t => $args{apdex_t}, - samples => $classes->{$class}->{$attrib}->{all}, - ); - } } } } @@ -784,9 +774,6 @@ sub metrics { median => $metrics->{classes}->{$where}->{$attrib}->{median} || 0, pct_95 => $metrics->{classes}->{$where}->{$attrib}->{pct_95} || 0, stddev => $metrics->{classes}->{$where}->{$attrib}->{stddev} || 0, - - apdex_t => $metrics->{classes}->{$where}->{$attrib}->{apdex_t}, - apdex => $metrics->{classes}->{$where}->{$attrib}->{apdex}, }; } @@ -1164,70 +1151,6 @@ sub _deep_copy_attrib_vals { return $copy; } -# Sub: calculate_apdex -# Calculate the Apdex score for the given T and response times. -# -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# t - Target threshold -# samples - Hashref with bucketized response time values, -# i.e. { bucket_number => n_responses, } -# -# Returns: -# Apdex score -sub calculate_apdex { - my ( $self, %args ) = @_; - my @required_args = qw(t samples); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my ($t, $samples) = @args{@required_args}; - - if ( $t <= 0 ) { - die "Invalid target threshold (T): $t. T must be greater than zero"; - } - - my $f = 4 * $t; - PTDEBUG && _d("Apdex T =", $t, "F =", $f); - - my $satisfied = 0; - my $tolerating = 0; - my $frustrated = 0; # just for debug output - my $n_samples = 0; - BUCKET: - for my $bucket ( keys %$samples ) { - my $n_responses = $samples->{$bucket}; - my $response_time = $buck_vals[$bucket]; - - # Response time increases from 0 to F. - # 0 --- T --- F - # ^ ^-- tolerating zone - # | - # +-------- satisfied zone - if ( $response_time <= $t ) { - $satisfied += $n_responses; - } - elsif ( $response_time <= $f ) { - $tolerating += $n_responses; - } - else { - $frustrated += $n_responses; - } - - $n_samples += $n_responses; - } - - my $apdex = sprintf('%.2f', ($satisfied + ($tolerating / 2)) / $n_samples); - PTDEBUG && _d($n_samples, "samples,", $satisfied, "satisfied,", - $tolerating, "tolerating,", $frustrated, "frustrated, Apdex score:", - $apdex); - - return $apdex; -} - # Sub: _get_value # Get the value of the attribute (or one of its alternatives) from the event. # Undef is a valid value. If the attrib or none of its alternatives exist diff --git a/t/lib/EventAggregator.t b/t/lib/EventAggregator.t index 38a16ca2..7ee7e30a 100644 --- a/t/lib/EventAggregator.t +++ b/t/lib/EventAggregator.t @@ -9,7 +9,7 @@ BEGIN { use strict; use warnings FATAL => 'all'; use English qw(-no_match_vars); -use Test::More tests => 82; +use Test::More; use QueryRewriter; use EventAggregator; @@ -431,7 +431,7 @@ foreach my $event (@$events) { is_deeply( $ea->results, $result, 'user aggregation' ); is($ea->type_for('Query_time'), 'num', 'Query_time is numeric'); -$ea->calculate_statistical_metrics(apdex_t => 1); +$ea->calculate_statistical_metrics(); is_deeply( $ea->metrics( where => 'bob', @@ -446,8 +446,6 @@ is_deeply( median => '0.000682', stddev => 0, pct_95 => '0.000682', - apdex_t => 1, - apdex => '1.00', }, 'Got simple hash of metrics from metrics()', ); @@ -466,8 +464,6 @@ is_deeply( median => 0, stddev => 0, pct_95 => 0, - apdex_t => undef, - apdex => undef, }, 'It does not crash on metrics()', ); @@ -1816,59 +1812,6 @@ is_deeply( "Merge results" ); -# ############################################################################# -# Apdex -# ############################################################################# - -my $samples = { - 280 => 10, # 0.81623354758492 satisfy - 281 => 10, # 0.85704522496417 satisfy - 282 => 10, # 0.89989748621238 satisfy - 283 => 50, # 0.94489236052300 satisfy - 284 => 50, # 0.99213697854915 satisfy - 285 => 10, # 1.04174382747661 tolerate - 290 => 10, # 1.32955843985657 tolerate - 313 => 1, # 4.08377033290049 frustrated -}; -my $apdex = $ea->calculate_apdex( - t => 1, - samples => $samples, -); - -is( - $apdex, - '0.93', - "Apdex score" -); - -$samples = { - 0 => 150, -}; -$apdex = $ea->calculate_apdex( - t => 1, - samples => $samples, -); - -is( - $apdex, - '1.00', - "Apdex score 1.00" -); - -$samples = { - 400 => 150, -}; -$apdex = $ea->calculate_apdex( - t => 1, - samples => $samples, -); - -is( - $apdex, - '0.00', - "Apdex score 0.00" -); - # ############################################################################# # Special-case attribs called *_crc for mqd --variations. # ############################################################################# @@ -1953,4 +1896,5 @@ like( qr/Complete test coverage/, '_d() works' ); +done_testing; exit; From 4fc66a08d3eb9cdaa88d60f6e43bd91e96e18afa Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 13:45:20 -0300 Subject: [PATCH 04/34] pqd: Remove sparklines --- bin/pt-query-digest | 191 +----------------- lib/ExplainAnalyzer.pm | 88 +------- lib/QueryReportFormatter.pm | 168 +-------------- t/lib/ExplainAnalyzer.t | 143 ------------- t/lib/QueryReportFormatter.t | 189 ----------------- .../QueryReportFormatter/report001.txt | 1 - .../QueryReportFormatter/report002.txt | 2 - .../QueryReportFormatter/report003.txt | 1 - .../QueryReportFormatter/report015.txt | 1 - .../QueryReportFormatter/report016.txt | 1 - .../QueryReportFormatter/report032.txt | 10 +- t/pt-query-digest/samples/binlog001.txt | 6 - t/pt-query-digest/samples/binlog002.txt | 3 - t/pt-query-digest/samples/genlog001.txt | 5 - t/pt-query-digest/samples/genlog002.txt | 2 - t/pt-query-digest/samples/genlog003.txt | 5 - t/pt-query-digest/samples/http_tcpdump002.txt | 10 - .../samples/issue_1196-output-5.6.txt | 8 +- t/pt-query-digest/samples/memc_tcpdump001.txt | 1 - t/pt-query-digest/samples/memc_tcpdump002.txt | 1 - t/pt-query-digest/samples/memc_tcpdump003.txt | 2 - .../memc_tcpdump003_report_key_print.txt | 1 - t/pt-query-digest/samples/memc_tcpdump004.txt | 2 - t/pt-query-digest/samples/memc_tcpdump005.txt | 1 - t/pt-query-digest/samples/memc_tcpdump006.txt | 1 - t/pt-query-digest/samples/memc_tcpdump007.txt | 1 - t/pt-query-digest/samples/memc_tcpdump008.txt | 1 - t/pt-query-digest/samples/memc_tcpdump009.txt | 1 - t/pt-query-digest/samples/memc_tcpdump010.txt | 1 - t/pt-query-digest/samples/rawlog001.txt | 2 - .../samples/slow001_distillreport.txt | 2 - t/pt-query-digest/samples/slow001_report.txt | 2 - .../samples/slow001_select_report.txt | 2 - .../samples/slow001_tablesreport.txt | 1 - .../samples/slow002-orderbynonexistent.txt | 7 - t/pt-query-digest/samples/slow002_iters_2.txt | 1 - .../samples/slow002_orderbyreport.txt | 2 - t/pt-query-digest/samples/slow002_report.txt | 7 - .../samples/slow002_report_filtered.txt | 1 - t/pt-query-digest/samples/slow003_report.txt | 1 - t/pt-query-digest/samples/slow004_report.txt | 1 - .../samples/slow006-order-by-re.txt | 2 - t/pt-query-digest/samples/slow006_AR_1.txt | 2 - t/pt-query-digest/samples/slow006_AR_2.txt | 1 - t/pt-query-digest/samples/slow006_AR_4.txt | 2 - t/pt-query-digest/samples/slow006_AR_5.txt | 1 - t/pt-query-digest/samples/slow006_report.txt | 2 - .../samples/slow007_explain_1-55.txt | 2 - .../samples/slow007_explain_2-51.txt | 2 - .../samples/slow007_explain_3.txt | 7 +- .../samples/slow007_explain_4.txt | 6 +- t/pt-query-digest/samples/slow008_report.txt | 3 - .../samples/slow010_reportbyfile.txt | 1 - t/pt-query-digest/samples/slow011_report.txt | 2 - t/pt-query-digest/samples/slow013_report.txt | 4 - .../slow013_report_fingerprint_user.txt | 2 - .../samples/slow013_report_limit.txt | 1 - .../samples/slow013_report_outliers.txt | 2 - .../samples/slow013_report_user.txt | 2 - t/pt-query-digest/samples/slow014_report.txt | 1 - t/pt-query-digest/samples/slow018_report.txt | 1 - t/pt-query-digest/samples/slow019_report.txt | 2 - .../samples/slow019_report_noza.txt | 2 - t/pt-query-digest/samples/slow023.txt | 1 - t/pt-query-digest/samples/slow024.txt | 3 - t/pt-query-digest/samples/slow028.txt | 1 - t/pt-query-digest/samples/slow032.txt | 1 - .../samples/slow033-precise-since-until.txt | 2 - .../samples/slow033-rtm-event-1h.txt | 1 - .../samples/slow033-rtm-event-25h.txt | 1 - .../samples/slow033-rtm-interval-1d.txt | 5 - .../samples/slow033-rtm-interval-30m.txt | 5 - .../slow033-rtm-interval-30s-3iter.txt | 3 - .../samples/slow033-rtm-interval-30s.txt | 6 - .../samples/slow033-since-Nd.txt | 2 - .../samples/slow033-since-yymmdd.txt | 2 - .../samples/slow033-since-yyyy-mm-dd.txt | 1 - .../samples/slow033-until-date.txt | 1 - ...r-by-Locktime-sum-with-Locktime-distro.txt | 5 - .../samples/slow034-order-by-Locktime-sum.txt | 5 - t/pt-query-digest/samples/slow035.txt | 2 - t/pt-query-digest/samples/slow037_report.txt | 1 - .../samples/slow042-show-all-host.txt | 1 - t/pt-query-digest/samples/slow042.txt | 1 - t/pt-query-digest/samples/slow048.txt | 1 - t/pt-query-digest/samples/slow049.txt | 3 - t/pt-query-digest/samples/slow050.txt | 1 - t/pt-query-digest/samples/slow051.txt | 1 - t/pt-query-digest/samples/slow052.txt | 2 - t/pt-query-digest/samples/slow053.txt | 3 - t/pt-query-digest/samples/slow054.txt | 1 - t/pt-query-digest/samples/slow055.txt | 1 - t/pt-query-digest/samples/slow056.txt | 2 - t/pt-query-digest/samples/tcpdump001.txt | 1 - .../samples/tcpdump002_report.txt | 4 - t/pt-query-digest/samples/tcpdump003.txt | 1 - t/pt-query-digest/samples/tcpdump012.txt | 1 - .../samples/tcpdump017_report.txt | 1 - t/pt-query-digest/samples/tcpdump021.txt | 3 - t/pt-query-digest/samples/tcpdump022.txt | 2 - t/pt-query-digest/samples/tcpdump023.txt | 2 - t/pt-query-digest/samples/tcpdump024.txt | 2 - t/pt-query-digest/samples/tcpdump025.txt | 2 - t/pt-query-digest/samples/tcpdump033.txt | 3 - t/pt-query-digest/samples/tcpdump041.txt | 1 - 105 files changed, 18 insertions(+), 996 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index b09e5272..eebec5d5 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -1768,6 +1768,9 @@ sub parse_timestamp { . (defined $f ? '%09.6f' : '%02d'), $y + 2000, $m, $d, $h, $i, (defined $f ? $s + $f : $s); } + elsif ( $val =~ m/^$proper_ts$/ ) { + return $val; + } return $val; } @@ -6699,28 +6702,6 @@ sub event_report { ); } - if ( $o->get('explain') && $results->{samples}->{$item}->{arg} ) { - eval { - my $sparkline = $self->explain_sparkline( - $results->{samples}->{$item}->{arg}, $args{db}); - push @result, "# EXPLAIN sparkline: $sparkline\n"; - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - - if ( my $attrib = $o->get('report-histogram') ) { - my $sparkline = $self->distro_sparkline( - %args, - attrib => $attrib, - item => $item, - ); - if ( $sparkline ) { - push @result, "# $attrib sparkline: |$sparkline|"; - } - } - if ( my $ts = $store->{ts} ) { my $time_range = $self->format_time_range($ts) || "unknown"; push @result, "# Time range: $time_range"; @@ -6857,73 +6838,6 @@ sub chart_distro { return join("\n", @results) . "\n"; } - -sub distro_sparkline { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item attrib) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; - my $attrib = $args{attrib}; - - my $results = $ea->results(); - my $store = $results->{classes}->{$item}->{$attrib}; - my $vals = $store->{all}; - - my $all_zeros_sparkline = " " x 8; - - return $all_zeros_sparkline unless defined $vals && scalar %$vals; - - my @buck_tens = $ea->buckets_of(10); - my @distro = map { 0 } (0 .. 7); - my @buckets = map { 0 } (0..999); - map { $buckets[$_] = $vals->{$_} } keys %$vals; - $vals = \@buckets; - map { $distro[$buck_tens[$_]] += $vals->[$_] } (1 .. @$vals - 1); - - my $vals_per_mark; - my $max_val = 0; - my $max_disp_width = 64; - foreach my $n_vals ( @distro ) { - $max_val = $n_vals if $n_vals > $max_val; - } - $vals_per_mark = $max_val / $max_disp_width; - - my ($min, $max); - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - - $min = $n_marks if $n_marks && (!$min || $n_marks < $min); - $max = $n_marks if !$max || $n_marks > $max; - } - return $all_zeros_sparkline unless $min && $max; - - - $min = 0 if $min == $max; - my @range_min; - my $d = floor((($max+0.00001)-$min) / 4); - for my $x ( 1..4 ) { - push @range_min, $min + ($d * $x); - } - - my $sparkline = ""; - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - $sparkline .= $n_marks <= 0 ? ' ' - : $n_marks <= $range_min[0] ? '_' - : $n_marks <= $range_min[1] ? '.' - : $n_marks <= $range_min[2] ? '-' - : '^'; - } - - return $sparkline; -} - sub profile { my ( $self, %args ) = @_; foreach my $arg ( qw(ea worst groupby) ) { @@ -6959,19 +6873,6 @@ sub profile { vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1), ); - if ( $o->get('explain') && $samp_query ) { - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - eval { - $profile{explain_sparkline} = $self->explain_sparkline( - $samp_query, $default_db); - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - push @profiles, \%profile; } @@ -6988,7 +6889,6 @@ sub profile { { name => 'Calls', right_justify => 1, }, { name => 'R/Call', right_justify => 1, }, { name => 'V/M', right_justify => 1, width => 5, }, - ( $o->get('explain') ? { name => 'EXPLAIN' } : () ), { name => 'Item', }, ); $report->set_columns(@cols); @@ -7005,7 +6905,6 @@ sub profile { $item->{cnt}, $rc, $vmr, - ( $o->get('explain') ? $item->{explain_sparkline} || "" : () ), $item->{sample}, ); $report->add_line(@vals); @@ -7032,7 +6931,6 @@ sub profile { $misc->{cnt}, $rc, '0.0', # variance-to-mean ratio is not meaningful here - ( $o->get('explain') ? "MISC" : () ), "<".scalar @$other." ITEMS>", ); } @@ -7417,34 +7315,6 @@ sub format_time_range { return $min && $max ? "$min to $max" : ''; } -sub explain_sparkline { - my ( $self, $query, $db ) = @_; - return unless $query; - - my $q = $self->{Quoter}; - my $dbh = $self->{dbh}; - my $ex = $self->{ExplainAnalyzer}; - return unless $dbh && $ex; - - if ( $db ) { - PTDEBUG && _d($dbh, "USE", $db); - $dbh->do("USE " . $q->quote($db)); - } - my $res = $ex->normalize( - $ex->explain_query( - dbh => $dbh, - query => $query, - ) - ); - - my $sparkline; - if ( $res ) { - $sparkline = $ex->sparkline(explain => $res); - } - - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } @@ -11393,61 +11263,6 @@ sub fingerprint { my ($explain) = @args{@required_args}; } -sub sparkline { - my ( $self, %args ) = @_; - my @required_args = qw(explain); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($explain) = @args{@required_args}; - PTDEBUG && _d("Making sparkline for", Dumper($explain)); - - my $access_code = { - 'ALL' => 'a', - 'const' => 'c', - 'eq_ref' => 'e', - 'fulltext' => 'f', - 'index' => 'i', - 'index_merge' => 'm', - 'range' => 'n', - 'ref_or_null' => 'o', - 'ref' => 'r', - 'system' => 's', - 'unique_subquery' => 'u', - }; - - my $sparkline = ''; - my ($T, $F); # Using temporary, Using filesort - - foreach my $tbl ( @$explain ) { - my $code; - if ( defined $tbl->{type} ) { - $code = $access_code->{$tbl->{type}} || "?"; - $code = uc $code if $tbl->{Extra}->{'Using index'}; - } - else { - $code = '-' - }; - $sparkline .= $code; - - $T = 1 if $tbl->{Extra}->{'Using temporary'}; - $F = 1 if $tbl->{Extra}->{'Using filesort'}; - } - - if ( $T || $F ) { - if ( $explain->[-1]->{Extra}->{'Using temporary'} - || $explain->[-1]->{Extra}->{'Using filesort'} ) { - $sparkline .= ">" . ($T ? "T" : "") . ($F ? "F" : ""); - } - else { - $sparkline = ($T ? "T" : "") . ($F ? "F" : "") . ">$sparkline"; - } - } - - PTDEBUG && _d("sparkline:", $sparkline); - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } diff --git a/lib/ExplainAnalyzer.pm b/lib/ExplainAnalyzer.pm index cf04d0c6..9dc32bd9 100644 --- a/lib/ExplainAnalyzer.pm +++ b/lib/ExplainAnalyzer.pm @@ -215,7 +215,7 @@ sub save_usage_for { # explain - Hashref of normalized EXPLAIN data # # Returns: -# Fingerprint/sparkline string +# Fingerprint string sub fingerprint { my ( $self, %args ) = @_; my @required_args = qw(explain); @@ -225,92 +225,6 @@ sub fingerprint { my ($explain) = @args{@required_args}; } -# Sub: sparkline -# Create a sparkline of EXPLAIN data from . A spark line -# is a very compact, terse fingerprint that represents just the following. -# See . -# -# access (for each table): -# - a: ALL -# - c: const -# - e: eq_ref -# - f: fulltext -# - i: index -# - m: index_merge -# - n: range -# - o: ref_or_null -# - r: ref -# - s: system -# - u: unique_subquery -# -# Extra: -# - uppsercaes access code: Using extra -# - T: Using temprary -# - F: Using filesort -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# explain - Hashref of normalized EXPLAIN data -# -# Returns: -# Sparkline string like (start code)TF>Ree(end code) -sub sparkline { - my ( $self, %args ) = @_; - my @required_args = qw(explain); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($explain) = @args{@required_args}; - PTDEBUG && _d("Making sparkline for", Dumper($explain)); - - my $access_code = { - 'ALL' => 'a', - 'const' => 'c', - 'eq_ref' => 'e', - 'fulltext' => 'f', - 'index' => 'i', - 'index_merge' => 'm', - 'range' => 'n', - 'ref_or_null' => 'o', - 'ref' => 'r', - 'system' => 's', - 'unique_subquery' => 'u', - }; - - my $sparkline = ''; - my ($T, $F); # Using temporary, Using filesort - - foreach my $tbl ( @$explain ) { - my $code; - if ( defined $tbl->{type} ) { - $code = $access_code->{$tbl->{type}} || "?"; - $code = uc $code if $tbl->{Extra}->{'Using index'}; - } - else { - $code = '-' - }; - $sparkline .= $code; - - $T = 1 if $tbl->{Extra}->{'Using temporary'}; - $F = 1 if $tbl->{Extra}->{'Using filesort'}; - } - - if ( $T || $F ) { - if ( $explain->[-1]->{Extra}->{'Using temporary'} - || $explain->[-1]->{Extra}->{'Using filesort'} ) { - $sparkline .= ">" . ($T ? "T" : "") . ($F ? "F" : ""); - } - else { - $sparkline = ($T ? "T" : "") . ($F ? "F" : "") . ">$sparkline"; - } - } - - PTDEBUG && _d("sparkline:", $sparkline); - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index c0340fb8..33f99410 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -57,7 +57,6 @@ use constant MAX_STRING_LENGTH => 10; # QueryReview - object used in # dbh - dbh used in # ExplainAnalyzer - object used in . -# This causes a sparkline to be printed (issue 1141). # # Returns: # QueryReportFormatter object @@ -95,9 +94,7 @@ sub new { # Set a report formatter object for a report. By default this package will # instantiate ReportFormatter objects to format columnized reports (e.g. # for profile and prepared reports). Setting a caller-created formatter -# object (usually a obj) is used for tested and also by -# to extend the profile report line width to 82 for -# the --explain sparkline. +# object (usually a obj) is used for tests. # # Parameters: # %args - Arguments @@ -552,32 +549,6 @@ sub event_report { ); } - # Fourth line: EXPLAIN sparkline if --explain. - if ( $o->get('explain') && $results->{samples}->{$item}->{arg} ) { - eval { - my $sparkline = $self->explain_sparkline( - $results->{samples}->{$item}->{arg}, $args{db}); - push @result, "# EXPLAIN sparkline: $sparkline\n"; - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - - if ( my $attrib = $o->get('report-histogram') ) { - my $sparkline = $self->distro_sparkline( - %args, - attrib => $attrib, - item => $item, - ); - if ( $sparkline ) { - # I find the | | bookends help make the sparkchart graph more clear. - # Else with just .^- it's difficult to tell where the chart beings - # or ends. - push @result, "# $attrib sparkline: |$sparkline|"; - } - } - # Last line before column headers: time range if ( my $ts = $store->{ts} ) { my $time_range = $self->format_time_range($ts) || "unknown"; @@ -736,98 +707,6 @@ sub chart_distro { return join("\n", @results) . "\n"; } - -# Sub: distro_sparkline -# Make a sparkline of the graph. The following -# character codes are used: _.-^ If a bucket doesn't have a value, a -# space is used. So _ buckets are the lowest lines on the full graph -# (), and ^ are the peaks on the full graph. See -# QueryReportFormatter.t for several examples. -# -# This sub isn't the most optimized. The first half is the same code -# as . Then the latter code, unique to this sub, -# essentially compresses the full chart further into 8 characters using -# the 4 char codes above. -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# ea - object -# item - Item in results to chart -# attrib - Attribute of item to chart -# -# Returns: -# Sparkchart string -sub distro_sparkline { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item attrib) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; - my $attrib = $args{attrib}; - - my $results = $ea->results(); - my $store = $results->{classes}->{$item}->{$attrib}; - my $vals = $store->{all}; - - my $all_zeros_sparkline = " " x 8; - - return $all_zeros_sparkline unless defined $vals && scalar %$vals; - - my @buck_tens = $ea->buckets_of(10); - my @distro = map { 0 } (0 .. 7); - my @buckets = map { 0 } (0..999); - map { $buckets[$_] = $vals->{$_} } keys %$vals; - $vals = \@buckets; - map { $distro[$buck_tens[$_]] += $vals->[$_] } (1 .. @$vals - 1); - - my $vals_per_mark; - my $max_val = 0; - my $max_disp_width = 64; - foreach my $n_vals ( @distro ) { - $max_val = $n_vals if $n_vals > $max_val; - } - $vals_per_mark = $max_val / $max_disp_width; - - my ($min, $max); - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - - $min = $n_marks if $n_marks && (!$min || $n_marks < $min); - $max = $n_marks if !$max || $n_marks > $max; - } - return $all_zeros_sparkline unless $min && $max; - - # That ^ code is mostly the same as chart_distro(). Now here's - # our own unique code. - - # Divide the range by 4 because there are 4 char codes: _.-^ - $min = 0 if $min == $max; - my @range_min; - my $d = floor((($max+0.00001)-$min) / 4); - for my $x ( 1..4 ) { - push @range_min, $min + ($d * $x); - } - - my $sparkline = ""; - foreach my $i ( 0 .. $#distro ) { - my $n_vals = $distro[$i]; - my $n_marks = $n_vals / ($vals_per_mark || 1); - $n_marks = 1 if $n_marks < 1 && $n_vals > 0; - $sparkline .= $n_marks <= 0 ? ' ' - : $n_marks <= $range_min[0] ? '_' - : $n_marks <= $range_min[1] ? '.' - : $n_marks <= $range_min[2] ? '-' - : '^'; - } - - return $sparkline; -} - # Profile subreport (issue 381). # Arguments: # * ea obj: EventAggregator @@ -873,20 +752,6 @@ sub profile { vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1), ); - # Get EXPLAIN sparkline if --explain. - if ( $o->get('explain') && $samp_query ) { - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - eval { - $profile{explain_sparkline} = $self->explain_sparkline( - $samp_query, $default_db); - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d("Failed to get EXPLAIN sparkline:", $EVAL_ERROR); - } - } - push @profiles, \%profile; } @@ -903,7 +768,6 @@ sub profile { { name => 'Calls', right_justify => 1, }, { name => 'R/Call', right_justify => 1, }, { name => 'V/M', right_justify => 1, width => 5, }, - ( $o->get('explain') ? { name => 'EXPLAIN' } : () ), { name => 'Item', }, ); $report->set_columns(@cols); @@ -920,7 +784,6 @@ sub profile { $item->{cnt}, $rc, $vmr, - ( $o->get('explain') ? $item->{explain_sparkline} || "" : () ), $item->{sample}, ); $report->add_line(@vals); @@ -949,7 +812,6 @@ sub profile { $misc->{cnt}, $rc, '0.0', # variance-to-mean ratio is not meaningful here - ( $o->get('explain') ? "MISC" : () ), "<".scalar @$other." ITEMS>", ); } @@ -1380,34 +1242,6 @@ sub format_time_range { return $min && $max ? "$min to $max" : ''; } -sub explain_sparkline { - my ( $self, $query, $db ) = @_; - return unless $query; - - my $q = $self->{Quoter}; - my $dbh = $self->{dbh}; - my $ex = $self->{ExplainAnalyzer}; - return unless $dbh && $ex; - - if ( $db ) { - PTDEBUG && _d($dbh, "USE", $db); - $dbh->do("USE " . $q->quote($db)); - } - my $res = $ex->normalize( - $ex->explain_query( - dbh => $dbh, - query => $query, - ) - ); - - my $sparkline; - if ( $res ) { - $sparkline = $ex->sparkline(explain => $res); - } - - return $sparkline; -} - sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } diff --git a/t/lib/ExplainAnalyzer.t b/t/lib/ExplainAnalyzer.t index 32ccc67f..161362cf 100644 --- a/t/lib/ExplainAnalyzer.t +++ b/t/lib/ExplainAnalyzer.t @@ -425,149 +425,6 @@ is_deeply( ], 'Got saved usage for 0xdeadbeef'); -# ############################################################################# -# Issue 1141: Add "spark charts" to mk-query-digest profile -# ############################################################################# -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => 'foo', - type => 'eq_ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => [], - rows => 100, - Extra => { - 'Using index' => 1, - 'Using where' => 1, - }, - }, - ], - ), - "E", - "sparkline: basic 1 table eq_ref" -); - -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => 'foo', - type => 'eq_ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => [], - rows => 100, - Extra => { - 'Using index' => 1, - 'Using where' => 1, - 'Using filesort' => 1, - }, - }, - { id => 2, - select_type => 'PRIMARY', - table => 'bar', - type => 'ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => ['foo.col'], - rows => 100, - Extra => { - }, - }, - ], - ), - "F>Er", - "sparkline: 2 table with filesort at start" -); - -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => 'foo', - type => 'range', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => [], - rows => 100, - Extra => { - }, - }, - { id => 2, - select_type => 'PRIMARY', - table => 'bar', - type => 'ref', - possible_keys => ['idx'], - key => ['idx'], - key_len => [10], - ref => ['foo.col'], - rows => 100, - Extra => { - 'Using temporary' => 1, - 'Using filesort' => 1, - }, - }, - ], - ), - "nr>TF", - "sparkline: 2 table with temp and filesort at end" -); - -is( - $exa->sparkline(explain => - [ - { id => 1, - select_type => 'PRIMARY', - table => undef, - type => undef, - possible_keys => [], - key => [], - key_len => [], - ref => [], - rows => undef, - Extra => { - 'No tables used' => 1, - }, - }, - { id => 1, - select_type => 'UNION', - table => 'a', - type => 'index', - possible_keys => [], - key => ['PRIMARY'], - key_len => [2], - ref => [], - rows => 200, - Extra => { - 'Using index' => 1, - }, - }, - { id => undef, - select_type => 'UNION RESULT', - table => '', - type => 'ALL', - possible_keys => [], - key => [], - key_len => [], - ref => [], - rows => undef, - Extra => {}, - }, - ], - ), - "-Ia", - "sparkline: 3 tables, using index" -); - # ############################################################################# # Done. # ############################################################################# diff --git a/t/lib/QueryReportFormatter.t b/t/lib/QueryReportFormatter.t index 7677df36..a3834d4c 100644 --- a/t/lib/QueryReportFormatter.t +++ b/t/lib/QueryReportFormatter.t @@ -1175,21 +1175,7 @@ SKIP: { } $ea->calculate_statistical_metrics(); - # Make sure that explain_sparkline() does USE db like explain_report() - # does because by mqd defaults expalin_sparline() is called by profile() - # so if it doesn't USE db then the EXPLAIN will fail. Here we reset - # the db to something else because we already called explain_report() - # above which did USE qrf. - # - # 5.6 really is that different: ia vs. TF>aI. It's smarter. $dbh->do("USE mysql"); - my $explain_sparkline = $qrf->explain_sparkline($arg, 'qrf'); - is( - $explain_sparkline, - $sandbox_version eq '5.6' ? "ia" : "TF>aI", - "explain_sparkling() uses db" - ); - $report = new ReportFormatter( line_width => 82, extend_right => 1, @@ -1360,181 +1346,6 @@ ok( "Variance-to-mean ration (issue 1124)" ); -# ############################################################################# -# Issue 1141: Add "spark charts" to mk-query-digest profile -# ############################################################################# -sub proc_events { - my ( %args ) = @_; - my ($arg, $attrib, $vals) = @args{qw(arg attrib vals)}; - - my $bytes = length $arg; - my $fingerprint = $qr->fingerprint($arg); - - $events = []; - foreach my $val ( @$vals ) { - push @$events, { - bytes => $bytes, - arg => $arg, - fingerprint => $fingerprint, - $attrib => $val, - } - } - - $ea = new EventAggregator( - groupby => 'fingerprint', - worst => 'Query_time', - ); - foreach my $event (@$events) { - $ea->aggregate($event); - } - $ea->calculate_statistical_metrics(apdex_t=>1); - - # Seeing the full chart helps determine what the - # sparkline should look like. - if ( $args{chart} ) { - $result = $qrf->chart_distro( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', - ); - print $result; - } - - return; -}; - -# Test sparklines in isolation. -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0 0 0)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - " ", - "Sparkchart line - all zeros" -); - -# 1us -# 10us -# 100us ################################################ -# 1ms ################################ -# 10ms ################################ -# 100ms ################################################################ -# 1s ################ -# 10s+ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.100000 0.500000 0.000600 0.008000 0.990000 1.000000 0.400000 0.003000 0.000200 0.000100 0.010000 0.020000)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - " -..^_ ", - "Sparkchart line 1" -); - -# 1us -# 10us -# 100us -# 1ms -# 10ms ################################ -# 100ms ################################################################ -# 1s ######## -# 10s+ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.01 0.03 0.08 0.09 0.3 0.5 0.5 0.6 0.7 0.5 0.5 0.9 1.0)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - " .^_ ", - "Sparkchart line 2" -); - -# 1us ################################################################ -# 10us ################################################################ -# 100us ################################################################ -# 1ms ################################################################ -# 10ms ################################################################ -# 100ms ################################################################ -# 1s ################################################################ -# 10s+ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.000003 0.000030 0.000300 0.003000 0.030000 0.300000 3)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - "^^^^^^^ ", - "Sparkchart line - vals in all ranges except 10s+" -); - - -# 1us ################################################################ -# 10us ################################################################ -# 100us -# 1ms -# 10ms -# 100ms -# 1s ################################################################ -# 10s+ ################################################################ -proc_events( - arg => 'select c from t', - attrib => 'Query_time', - vals => [qw(0.000003 0.000030 0.000003 0.000030 3 3 30 30)], -); -$result = $qrf->distro_sparkline( - ea => $ea, - item => 'select c from t', - attrib => 'Query_time', -); -is( - $result, - "^^ ^^", - "Sparkchart line - twin peaks" -); - -# Test that that ^ sparkchart appears in the event header properly. -$result = $qrf->event_report( - ea => $ea, - select => [ qw(Query_time) ], - item => 'select c from t', - rank => 1, - orderby => 'Query_time', - reason => 'top', -); -ok( - no_diff( - $result, - "t/lib/samples/QueryReportFormatter/report028.txt", - cmd_output => 1, - ), - 'Sparkchart in event header' -); - # ############################################################################ # Bug 887688: Prepared statements crash pt-query-digest # ############################################################################ diff --git a/t/lib/samples/QueryReportFormatter/report001.txt b/t/lib/samples/QueryReportFormatter/report001.txt index 78c30c81..ceb1cfc0 100644 --- a/t/lib/samples/QueryReportFormatter/report001.txt +++ b/t/lib/samples/QueryReportFormatter/report001.txt @@ -8,7 +8,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5796997451B1FA1D at byte 123 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report002.txt b/t/lib/samples/QueryReportFormatter/report002.txt index c2c2710f..193d5acd 100644 --- a/t/lib/samples/QueryReportFormatter/report002.txt +++ b/t/lib/samples/QueryReportFormatter/report002.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x3F79759E7FA2F117 at byte 1106 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637892 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,7 +31,6 @@ SELECT i FROM d.t WHERE i="3"\G # Query 2: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report003.txt b/t/lib/samples/QueryReportFormatter/report003.txt index 58aa0a53..c87a0881 100644 --- a/t/lib/samples/QueryReportFormatter/report003.txt +++ b/t/lib/samples/QueryReportFormatter/report003.txt @@ -6,7 +6,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5796997451B1FA1D at byte 123 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/lib/samples/QueryReportFormatter/report015.txt b/t/lib/samples/QueryReportFormatter/report015.txt index f38ae401..382a228c 100644 --- a/t/lib/samples/QueryReportFormatter/report015.txt +++ b/t/lib/samples/QueryReportFormatter/report015.txt @@ -1,6 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 3 diff --git a/t/lib/samples/QueryReportFormatter/report016.txt b/t/lib/samples/QueryReportFormatter/report016.txt index 3a6fe8bc..64434e38 100644 --- a/t/lib/samples/QueryReportFormatter/report016.txt +++ b/t/lib/samples/QueryReportFormatter/report016.txt @@ -1,6 +1,5 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xEDEF654FCCC4A4D8 at byte 0 _________ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 1 diff --git a/t/lib/samples/QueryReportFormatter/report032.txt b/t/lib/samples/QueryReportFormatter/report032.txt index fc82f664..f46eb104 100644 --- a/t/lib/samples/QueryReportFormatter/report032.txt +++ b/t/lib/samples/QueryReportFormatter/report032.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 0.00 ia SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 0.0 MISC <1 ITEMS> +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ========= +# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 0.00 SELECT t +# MISC 0xMISC 0.0003 100.0% 1 0.0003 0.0 <1 ITEMS> # Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ # Scores: V/M = 0.00 -# EXPLAIN sparkline: ia -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/binlog001.txt b/t/pt-query-digest/samples/binlog001.txt index e17a903e..d4ac8514 100644 --- a/t/pt-query-digest/samples/binlog001.txt +++ b/t/pt-query-digest/samples/binlog001.txt @@ -16,7 +16,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xCD948EAF18BC614E at byte 953 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:08 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -53,7 +52,6 @@ replace into test4.tbl9(tbl5, day, todo, comment) # Query 2: 0 QPS, 0x concurrency, ID 0xC356FD9EFD7D799E at byte 605 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -90,7 +88,6 @@ select e.tblo = o.tblo, # Query 3: 0 QPS, 0x concurrency, ID 0xB5E55291C7DE1096 at byte 1469 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:50 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -127,7 +124,6 @@ select o.tbl2 = e.tbl2, # Query 4: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 146 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:50 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -157,7 +153,6 @@ BEGIN\G # Query 5: 0 QPS, 0x concurrency, ID 0xED69B13F3D0161D0 at byte 2479 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:53 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -192,7 +187,6 @@ select last2metric1 = last1metric1, last2time = last1time, # Query 6: 0 QPS, 0x concurrency, ID 0x79BFEA84D0CED05F at byte 1889 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2007-12-07 12:02:53 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/binlog002.txt b/t/pt-query-digest/samples/binlog002.txt index c5edf880..3e1426a2 100644 --- a/t/pt-query-digest/samples/binlog002.txt +++ b/t/pt-query-digest/samples/binlog002.txt @@ -17,7 +17,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xF25D6D5AC7C18FF3 at byte 381 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2009-07-22 07:21:59 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -46,7 +45,6 @@ create database d\G # Query 2: 0 QPS, 0x concurrency, ID 0x03409022EB8A4AE7 at byte 795 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2009-07-22 07:22:16 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -73,7 +71,6 @@ create table foo (i int)\G # Query 3: 0 QPS, 0x concurrency, ID 0xF579EC4A9633EEA0 at byte 973 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2009-07-22 07:22:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/genlog001.txt b/t/pt-query-digest/samples/genlog001.txt index 1c0419c9..63b9ce91 100644 --- a/t/pt-query-digest/samples/genlog001.txt +++ b/t/pt-query-digest/samples/genlog001.txt @@ -8,7 +8,6 @@ # Query 1: 0.00 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 244 ___ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: 2005-10-07 21:55:24 to 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,7 +31,6 @@ administrator command: Connect\G # Query 2: 0.00 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 464 ___ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: 2005-10-07 21:55:24 to 2006-12-26 16:44:48 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -54,7 +52,6 @@ administrator command: Quit\G # Query 3: 0 QPS, 0x concurrency, ID 0x4D096479916B0F45 at byte 346 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -80,7 +77,6 @@ SELECT DISTINCT col FROM tbl WHERE foo=20061219\G # Query 4: 0 QPS, 0x concurrency, ID 0x44AAC79F41BCF692 at byte 58 _______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -109,7 +105,6 @@ SELECT foo # Query 5: 0 QPS, 0x concurrency, ID 0x44AE35A182869033 at byte 300 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2006-12-26 15:42:36 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/genlog002.txt b/t/pt-query-digest/samples/genlog002.txt index ca1b392f..fb0937bc 100644 --- a/t/pt-query-digest/samples/genlog002.txt +++ b/t/pt-query-digest/samples/genlog002.txt @@ -9,7 +9,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x2361B36A4AEB397B at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2010-02-11 00:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -36,7 +35,6 @@ SELECT category_id # Query 2: 0 QPS, 0x concurrency, ID 0x0A3E6DCD23F3445A at byte 237 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2010-02-11 00:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/genlog003.txt b/t/pt-query-digest/samples/genlog003.txt index c1c408e9..98de2ae3 100644 --- a/t/pt-query-digest/samples/genlog003.txt +++ b/t/pt-query-digest/samples/genlog003.txt @@ -8,7 +8,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 246 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,7 +31,6 @@ administrator command: Connect\G # Query 2: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 466 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -54,7 +52,6 @@ administrator command: Quit\G # Query 3: 0 QPS, 0x concurrency, ID 0x4D096479916B0F45 at byte 348 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -80,7 +77,6 @@ SELECT DISTINCT col FROM tbl WHERE foo=20061219\G # Query 4: 0 QPS, 0x concurrency, ID 0x44AAC79F41BCF692 at byte 60 _______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -109,7 +105,6 @@ SELECT foo # Query 5: 0 QPS, 0x concurrency, ID 0x44AE35A182869033 at byte 302 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2005-10-07 21:55:24 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/http_tcpdump002.txt b/t/pt-query-digest/samples/http_tcpdump002.txt index 5264d58a..8fdd14fc 100644 --- a/t/pt-query-digest/samples/http_tcpdump002.txt +++ b/t/pt-query-digest/samples/http_tcpdump002.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xFB0C089DD4451762 at byte 59213 ____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.411349 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -26,7 +25,6 @@ get www.percona.com/images/menu_our-vision.gif # Query 2: 0 QPS, 0x concurrency, ID 0x7C3AA9143C98C14A at byte 206 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.074855 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -51,7 +49,6 @@ get www.percona.com/about-us.html # Query 3: 0 QPS, 0x concurrency, ID 0x7CC09CE55CB7750C at byte 16362 ____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.157215 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -76,7 +73,6 @@ get www.percona.com/js/jquery.js # Query 4: 0 QPS, 0x concurrency, ID 0x44C0C94594575296 at byte 65644 ____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.420851 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -101,7 +97,6 @@ get www.percona.com/images/bg-gray-corner-top.gif # Query 5: 0 QPS, 0x concurrency, ID 0x08207FBDE8A42C36 at byte 67956 ____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.420996 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -126,7 +121,6 @@ get www.percona.com/images/handshake.jpg # Query 6: 0 QPS, 0x concurrency, ID 0x4F1E2B5E822F55B8 at byte 53100 ____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.346763 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -151,7 +145,6 @@ get www.percona.com/images/menu_team.gif # Query 7: 0 QPS, 0x concurrency, ID 0x7FB624EE10D71E1F at byte 170117 ___ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:14.737890 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -176,7 +169,6 @@ get hit.clickaider.com/s/forms.js # Query 8: 0 QPS, 0x concurrency, ID 0x1279DE4968C95A8D at byte 147447 ___ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:14.536149 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -201,7 +193,6 @@ get hit.clickaider.com/clickaider.js # Query 9: 0 QPS, 0x concurrency, ID 0x590BE2A84B8F0D5B at byte 167245 ___ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:14.678713 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -226,7 +217,6 @@ get hit.clickaider.com/pv?lng=140&&lnks=&t=About%20Percona&c=73a41b95-2926&r=htt # Query 10: 0 QPS, 0x concurrency, ID 0xFC5C4A690D695F35 at byte 55942 ___ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-11-09 15:31:09.373800 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/issue_1196-output-5.6.txt b/t/pt-query-digest/samples/issue_1196-output-5.6.txt index 945df0d5..860ec6b5 100644 --- a/t/pt-query-digest/samples/issue_1196-output-5.6.txt +++ b/t/pt-query-digest/samples/issue_1196-output-5.6.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# EXPLAIN sparkline: TF>aa -# Query_time sparkline: | ^ | # Time range: all events occurred at 2010-12-14 16:12:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump001.txt b/t/pt-query-digest/samples/memc_tcpdump001.txt index ea59f953..e300ad76 100644 --- a/t/pt-query-digest/samples/memc_tcpdump001.txt +++ b/t/pt-query-digest/samples/memc_tcpdump001.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x26193ADA9E14A97E at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 21:33:39.229179 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump002.txt b/t/pt-query-digest/samples/memc_tcpdump002.txt index 6f4e0216..3b0720c4 100644 --- a/t/pt-query-digest/samples/memc_tcpdump002.txt +++ b/t/pt-query-digest/samples/memc_tcpdump002.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x456F2F160AF2DC0F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 22:12:06.174390 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump003.txt b/t/pt-query-digest/samples/memc_tcpdump003.txt index a32419f9..b0bb5ff4 100644 --- a/t/pt-query-digest/samples/memc_tcpdump003.txt +++ b/t/pt-query-digest/samples/memc_tcpdump003.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAEBF67014CC9A7C0 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 22:12:06.175734 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -28,7 +27,6 @@ incr key # Query 2: 0 QPS, 0x concurrency, ID 0xC03129972E1D6A1F at byte 522 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-04 22:12:06.176181 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt b/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt index c0fea77a..df88fe79 100644 --- a/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt +++ b/t/pt-query-digest/samples/memc_tcpdump003_report_key_print.txt @@ -6,7 +6,6 @@ # Item 1: 4.47k QPS, 0.32x concurrency, ID 0x8228B9A98CA1531D at byte 0 __ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-04 22:12:06.175734 to 22:12:06.176181 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump004.txt b/t/pt-query-digest/samples/memc_tcpdump004.txt index cc9d2cfd..b7d8f2aa 100644 --- a/t/pt-query-digest/samples/memc_tcpdump004.txt +++ b/t/pt-query-digest/samples/memc_tcpdump004.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAEBF67014CC9A7C0 at byte 764 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 10:37:21.668469 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -29,7 +28,6 @@ incr key # Query 2: 0 QPS, 0x concurrency, ID 0xC03129972E1D6A1F at byte 1788 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 10:37:21.668851 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump005.txt b/t/pt-query-digest/samples/memc_tcpdump005.txt index 3bbceb2f..838774b6 100644 --- a/t/pt-query-digest/samples/memc_tcpdump005.txt +++ b/t/pt-query-digest/samples/memc_tcpdump005.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x26193ADA9E14A97E at byte 764 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 22:07:14.406827 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump006.txt b/t/pt-query-digest/samples/memc_tcpdump006.txt index 2d70b633..04149aa8 100644 --- a/t/pt-query-digest/samples/memc_tcpdump006.txt +++ b/t/pt-query-digest/samples/memc_tcpdump006.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x456F2F160AF2DC0F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-06 22:07:14.411331 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump007.txt b/t/pt-query-digest/samples/memc_tcpdump007.txt index 50484355..62e20baf 100644 --- a/t/pt-query-digest/samples/memc_tcpdump007.txt +++ b/t/pt-query-digest/samples/memc_tcpdump007.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x28C64E8A71EEAEAF at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-06-11 21:54:49.059144 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump008.txt b/t/pt-query-digest/samples/memc_tcpdump008.txt index 0dd87d4a..931f13dd 100644 --- a/t/pt-query-digest/samples/memc_tcpdump008.txt +++ b/t/pt-query-digest/samples/memc_tcpdump008.txt @@ -2,7 +2,6 @@ # Query 1: 645.28k QPS, 1.29x concurrency, ID 0x456F2F160AF2DC0F at byte 0 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: |^ | # Time range: 2009-07-06 22:07:14.411331 to 22:07:14.411334 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump009.txt b/t/pt-query-digest/samples/memc_tcpdump009.txt index 3fd672fb..47340315 100644 --- a/t/pt-query-digest/samples/memc_tcpdump009.txt +++ b/t/pt-query-digest/samples/memc_tcpdump009.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6A3331FD94A66F54 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-06-11 21:54:52.244534 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/memc_tcpdump010.txt b/t/pt-query-digest/samples/memc_tcpdump010.txt index a6825f4c..ae021d35 100644 --- a/t/pt-query-digest/samples/memc_tcpdump010.txt +++ b/t/pt-query-digest/samples/memc_tcpdump010.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x3D1AED9A2A3A73C8 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-09 22:00:29.066476 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/rawlog001.txt b/t/pt-query-digest/samples/rawlog001.txt index b5722bb6..8227959f 100644 --- a/t/pt-query-digest/samples/rawlog001.txt +++ b/t/pt-query-digest/samples/rawlog001.txt @@ -8,7 +8,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xCB5621E548E5497F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 @@ -32,7 +31,6 @@ SELECT c FROM t WHERE id=1\G # Query 2: 0 QPS, 0x concurrency, ID 0x774B2B0B59EBAC2C at byte 27 _______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 diff --git a/t/pt-query-digest/samples/slow001_distillreport.txt b/t/pt-query-digest/samples/slow001_distillreport.txt index def4a6ea..757c6152 100644 --- a/t/pt-query-digest/samples/slow001_distillreport.txt +++ b/t/pt-query-digest/samples/slow001_distillreport.txt @@ -6,7 +6,6 @@ # Item 1: 0 QPS, 0x concurrency, ID 0x82E67ABEEDCA3249 at byte 0 _________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,7 +33,6 @@ SELECT n # Item 2: 0 QPS, 0x concurrency, ID 0x7AD070CD3F4121D5 at byte 359 _______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:45:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow001_report.txt b/t/pt-query-digest/samples/slow001_report.txt index 95077c7c..d85429cb 100644 --- a/t/pt-query-digest/samples/slow001_report.txt +++ b/t/pt-query-digest/samples/slow001_report.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0 ________ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,7 +31,6 @@ select sleep(2) from n\G # Query 2: 0 QPS, 0x concurrency, ID 0x3A99CC42AEDCCFCD at byte 359 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:45:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow001_select_report.txt b/t/pt-query-digest/samples/slow001_select_report.txt index 412fd390..27d29433 100644 --- a/t/pt-query-digest/samples/slow001_select_report.txt +++ b/t/pt-query-digest/samples/slow001_select_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 @@ -25,7 +24,6 @@ select sleep(2) from n\G # Query 2: 0 QPS, 0x concurrency, ID 0x3A99CC42AEDCCFCD at byte 359 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 1 diff --git a/t/pt-query-digest/samples/slow001_tablesreport.txt b/t/pt-query-digest/samples/slow001_tablesreport.txt index 2de5f139..5dc70bf3 100644 --- a/t/pt-query-digest/samples/slow001_tablesreport.txt +++ b/t/pt-query-digest/samples/slow001_tablesreport.txt @@ -6,7 +6,6 @@ # Item 1: 0.03 QPS, 0.05x concurrency, ID 0x1161D7068EB79526 at byte 0 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-10-15 21:43:52 to 21:45:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt index f81d43ff..1bad0e83 100644 --- a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt +++ b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -44,7 +43,6 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 # Query 2: 0 QPS, 0x concurrency, ID 0x0FFE94ABA6A2A9E8 at byte 1334 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -87,7 +85,6 @@ select vab3concept1id = '91848182522' from db4.vab3concept1upload where va # Query 3: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 2393 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -129,7 +126,6 @@ select biz = '91848182522' from foo.bar \G # Query 4: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -172,7 +168,6 @@ select boop='bop: 899' from bizzle.bat where fillze='899'\G # Query 5: 0 QPS, 0x concurrency, ID 0xC22D235B07D1D774 at byte 1864 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -211,7 +206,6 @@ VALUES ('211', '18')\G # Query 6: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 815 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -250,7 +244,6 @@ VALUES ('', 'Exact')\G # Query 7: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002_iters_2.txt b/t/pt-query-digest/samples/slow002_iters_2.txt index 178b2ea8..e0ee2280 100644 --- a/t/pt-query-digest/samples/slow002_iters_2.txt +++ b/t/pt-query-digest/samples/slow002_iters_2.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002_orderbyreport.txt b/t/pt-query-digest/samples/slow002_orderbyreport.txt index 3b09e861..5eaf9e0b 100644 --- a/t/pt-query-digest/samples/slow002_orderbyreport.txt +++ b/t/pt-query-digest/samples/slow002_orderbyreport.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 3374 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -45,7 +44,6 @@ select biz = '91848182522' from foo.bar \G # Query 2: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002_report.txt b/t/pt-query-digest/samples/slow002_report.txt index f383847f..50d6b4b3 100644 --- a/t/pt-query-digest/samples/slow002_report.txt +++ b/t/pt-query-digest/samples/slow002_report.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x66825DDC008FFA89 at byte 338 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -43,7 +42,6 @@ select n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 # Query 2: 0 QPS, 0x concurrency, ID 0x0FFE94ABA6A2A9E8 at byte 1334 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -86,7 +84,6 @@ select vab3concept1id = '91848182522' from db4.vab3concept1upload where va # Query 3: 0 QPS, 0x concurrency, ID 0xB211BA2B8D6D065C at byte 3374 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -128,7 +125,6 @@ select biz = '91848182522' from foo.bar \G # Query 4: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -171,7 +167,6 @@ select boop='bop: 899' from bizzle.bat where fillze='899'\G # Query 5: 0 QPS, 0x concurrency, ID 0xC22D235B07D1D774 at byte 1864 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -210,7 +205,6 @@ VALUES ('211', '18')\G # Query 6: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 815 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -249,7 +243,6 @@ VALUES ('', 'Exact')\G # Query 7: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002_report_filtered.txt b/t/pt-query-digest/samples/slow002_report_filtered.txt index 10056cbf..294fdd7f 100644 --- a/t/pt-query-digest/samples/slow002_report_filtered.txt +++ b/t/pt-query-digest/samples/slow002_report_filtered.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6969975466519B81 at byte 2861 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow003_report.txt b/t/pt-query-digest/samples/slow003_report.txt index faeee997..01bb734f 100644 --- a/t/pt-query-digest/samples/slow003_report.txt +++ b/t/pt-query-digest/samples/slow003_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x85FFF5AA78E5FF6A at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow004_report.txt b/t/pt-query-digest/samples/slow004_report.txt index 7f4b2496..1d44affc 100644 --- a/t/pt-query-digest/samples/slow004_report.txt +++ b/t/pt-query-digest/samples/slow004_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xB16C9E5B3D9C484F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006-order-by-re.txt b/t/pt-query-digest/samples/slow006-order-by-re.txt index 51ffe8b9..80e01a69 100644 --- a/t/pt-query-digest/samples/slow006-order-by-re.txt +++ b/t/pt-query-digest/samples/slow006-order-by-re.txt @@ -2,7 +2,6 @@ # Query 1: 0.05 QPS, 0x concurrency, ID 0xA20C29AF174CE545 at byte 1833 __ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0x concurrency, ID 0xD4CD74934382A184 at byte 1469 __ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_1.txt b/t/pt-query-digest/samples/slow006_AR_1.txt index adb32dd6..481c2715 100644 --- a/t/pt-query-digest/samples/slow006_AR_1.txt +++ b/t/pt-query-digest/samples/slow006_AR_1.txt @@ -2,7 +2,6 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -41,7 +40,6 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_2.txt b/t/pt-query-digest/samples/slow006_AR_2.txt index eab578a5..dac2e8ba 100644 --- a/t/pt-query-digest/samples/slow006_AR_2.txt +++ b/t/pt-query-digest/samples/slow006_AR_2.txt @@ -3,7 +3,6 @@ # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_4.txt b/t/pt-query-digest/samples/slow006_AR_4.txt index aa7882c6..fa2361c4 100644 --- a/t/pt-query-digest/samples/slow006_AR_4.txt +++ b/t/pt-query-digest/samples/slow006_AR_4.txt @@ -2,7 +2,6 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -41,7 +40,6 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_AR_5.txt b/t/pt-query-digest/samples/slow006_AR_5.txt index 3d6b8ac2..cef99266 100644 --- a/t/pt-query-digest/samples/slow006_AR_5.txt +++ b/t/pt-query-digest/samples/slow006_AR_5.txt @@ -3,7 +3,6 @@ # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow006_report.txt b/t/pt-query-digest/samples/slow006_report.txt index 166ac17c..4f5fa94c 100644 --- a/t/pt-query-digest/samples/slow006_report.txt +++ b/t/pt-query-digest/samples/slow006_report.txt @@ -2,7 +2,6 @@ # Query 1: 0.05 QPS, 0.00x concurrency, ID 0xA20C29AF174CE545 at byte 1833 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:49:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT col FROM foo_tbl\G # Query 2: 0.30 QPS, 0.00x concurrency, ID 0xD4CD74934382A184 at byte 1469 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:57 to 11:49:07 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_1-55.txt b/t/pt-query-digest/samples/slow007_explain_1-55.txt index b17c8065..a74ee132 100644 --- a/t/pt-query-digest/samples/slow007_explain_1-55.txt +++ b/t/pt-query-digest/samples/slow007_explain_1-55.txt @@ -2,8 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# EXPLAIN sparkline: I -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_2-51.txt b/t/pt-query-digest/samples/slow007_explain_2-51.txt index 61fd31c7..511c5924 100644 --- a/t/pt-query-digest/samples/slow007_explain_2-51.txt +++ b/t/pt-query-digest/samples/slow007_explain_2-51.txt @@ -2,8 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# EXPLAIN sparkline: I -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_3.txt b/t/pt-query-digest/samples/slow007_explain_3.txt index 534706ae..91235c91 100644 --- a/t/pt-query-digest/samples/slow007_explain_3.txt +++ b/t/pt-query-digest/samples/slow007_explain_3.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,6 +33,6 @@ SELECT fruit FROM trees\G # EXPLAIN failed: DBD::mysql::st execute failed: Table 'food.trees' doesn't exist [for Statement "EXPLAIN /*!50100 PARTITIONS */ SELECT fruit FROM trees"] at line ?. # Profile -# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ===== ========== ============ -# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 SELECT trees +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============ +# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 SELECT trees diff --git a/t/pt-query-digest/samples/slow007_explain_4.txt b/t/pt-query-digest/samples/slow007_explain_4.txt index f7fff1f3..42013836 100644 --- a/t/pt-query-digest/samples/slow007_explain_4.txt +++ b/t/pt-query-digest/samples/slow007_explain_4.txt @@ -1,5 +1,5 @@ # Profile -# Rank Query ID Response time Calls R/Call V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ===== ======= ============ -# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 I SELECT trees +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ============ +# 1 0x8E306CDB7A800841 0.0000 100.0% 1 0.0000 0.00 SELECT trees diff --git a/t/pt-query-digest/samples/slow008_report.txt b/t/pt-query-digest/samples/slow008_report.txt index 9cc5ccf3..f4bb7458 100644 --- a/t/pt-query-digest/samples/slow008_report.txt +++ b/t/pt-query-digest/samples/slow008_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xC72BF45D68E35A6E at byte 435 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 @@ -33,7 +32,6 @@ SELECT MIN(id),MAX(id) FROM tbl\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 @@ -60,7 +58,6 @@ SET NAMES utf8\G # Query 3: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: |^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 diff --git a/t/pt-query-digest/samples/slow010_reportbyfile.txt b/t/pt-query-digest/samples/slow010_reportbyfile.txt index 07278f86..de0b5fa3 100644 --- a/t/pt-query-digest/samples/slow010_reportbyfile.txt +++ b/t/pt-query-digest/samples/slow010_reportbyfile.txt @@ -6,7 +6,6 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xE0976A52E15A18AC at byte 0 _________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow011_report.txt b/t/pt-query-digest/samples/slow011_report.txt index 75b8cad7..d624a280 100644 --- a/t/pt-query-digest/samples/slow011_report.txt +++ b/t/pt-query-digest/samples/slow011_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.02 -# Query_time sparkline: |^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 2 @@ -29,7 +28,6 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 663 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 2 diff --git a/t/pt-query-digest/samples/slow013_report.txt b/t/pt-query-digest/samples/slow013_report.txt index dc13096b..3e8d479b 100644 --- a/t/pt-query-digest/samples/slow013_report.txt +++ b/t/pt-query-digest/samples/slow013_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -30,7 +29,6 @@ SHOW STATUS\G # Query 2: 0 QPS, 0x concurrency, ID 0x3AEAAD0E15D725B5 at byte 600 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -58,7 +56,6 @@ SET autocommit=0\G # Query 3: 0 QPS, 0x concurrency, ID 0x813031B8BBC3B329 at byte 782 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -86,7 +83,6 @@ commit\G # Query 4: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 385 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: |^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt b/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt index 943f468c..9ba33aec 100644 --- a/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt +++ b/t/pt-query-digest/samples/slow013_report_fingerprint_user.txt @@ -16,7 +16,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -48,7 +47,6 @@ SHOW STATUS\G # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.15 -# Query_time sparkline: |^ ^ | # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_limit.txt b/t/pt-query-digest/samples/slow013_report_limit.txt index d08830a8..d465694e 100644 --- a/t/pt-query-digest/samples/slow013_report_limit.txt +++ b/t/pt-query-digest/samples/slow013_report_limit.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x31DA25F95494CA95 at byte 174 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:20 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_outliers.txt b/t/pt-query-digest/samples/slow013_report_outliers.txt index 2e7cfa9b..6b60ddc0 100644 --- a/t/pt-query-digest/samples/slow013_report_outliers.txt +++ b/t/pt-query-digest/samples/slow013_report_outliers.txt @@ -6,7 +6,6 @@ # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.15 -# Query_time sparkline: |^ ^ | # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,7 +32,6 @@ mytopuser # Item 2: 0 QPS, 0x concurrency, ID 0x8F4C76E92F07EABE at byte 600 _______ # This item is included in the report because it matches --outliers. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow013_report_user.txt b/t/pt-query-digest/samples/slow013_report_user.txt index 0a350d53..1428d2c5 100644 --- a/t/pt-query-digest/samples/slow013_report_user.txt +++ b/t/pt-query-digest/samples/slow013_report_user.txt @@ -6,7 +6,6 @@ # Item 1: 2 QPS, 0.15x concurrency, ID 0x4F1658C9B243995F at byte 174 ____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.15 -# Query_time sparkline: |^ ^ | # Time range: 2008-11-27 08:51:20 to 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,7 +32,6 @@ mytopuser # Item 2: 0 QPS, 0x concurrency, ID 0x8F4C76E92F07EABE at byte 600 _______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2008-11-27 08:51:21 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow014_report.txt b/t/pt-query-digest/samples/slow014_report.txt index 0cdadc56..dae63fbd 100644 --- a/t/pt-query-digest/samples/slow014_report.txt +++ b/t/pt-query-digest/samples/slow014_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 1313 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow018_report.txt b/t/pt-query-digest/samples/slow018_report.txt index a3642b3b..a24e59af 100644 --- a/t/pt-query-digest/samples/slow018_report.txt +++ b/t/pt-query-digest/samples/slow018_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x6083030C4A5D8996 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow019_report.txt b/t/pt-query-digest/samples/slow019_report.txt index 0d04dbdb..2ae61093 100644 --- a/t/pt-query-digest/samples/slow019_report.txt +++ b/t/pt-query-digest/samples/slow019_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.02 -# Query_time sparkline: |^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 66 2 @@ -29,7 +28,6 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 diff --git a/t/pt-query-digest/samples/slow019_report_noza.txt b/t/pt-query-digest/samples/slow019_report_noza.txt index 0e01d7e0..fb1683d8 100644 --- a/t/pt-query-digest/samples/slow019_report_noza.txt +++ b/t/pt-query-digest/samples/slow019_report_noza.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 435 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.02 -# Query_time sparkline: |^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 66 2 @@ -29,7 +28,6 @@ administrator command: Quit\G # Query 2: 0 QPS, 0x concurrency, ID 0xCC47B42511EA22DD at byte 221 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 33 1 diff --git a/t/pt-query-digest/samples/slow023.txt b/t/pt-query-digest/samples/slow023.txt index 4760f68f..ae3db272 100644 --- a/t/pt-query-digest/samples/slow023.txt +++ b/t/pt-query-digest/samples/slow023.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E38374648788E52 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow024.txt b/t/pt-query-digest/samples/slow024.txt index 0dd9e86c..4bc6fe2c 100644 --- a/t/pt-query-digest/samples/slow024.txt +++ b/t/pt-query-digest/samples/slow024.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x93E5C17055D970BE at byte 514419 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -32,7 +31,6 @@ INSERT INTO `film_actor` VALUES (1,1,'2006-02-15 10:05:03') /*... omitted ...*/O # Query 2: 0 QPS, 0x concurrency, ID 0xA1C3EE4F5996E672 at byte 342942 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -62,7 +60,6 @@ INSERT IGNORE INTO `film_actor` VALUES (1,1,'2006-02-15 10:05:03') /*... omitted # Query 3: 0 QPS, 0x concurrency, ID 0xA2C576176F348267 at byte 171471 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow028.txt b/t/pt-query-digest/samples/slow028.txt index 99ea2ce2..d0f27025 100644 --- a/t/pt-query-digest/samples/slow028.txt +++ b/t/pt-query-digest/samples/slow028.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x182FF6A853858893 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-10-15 21:43:52 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow032.txt b/t/pt-query-digest/samples/slow032.txt index d3000c2a..18c3f306 100644 --- a/t/pt-query-digest/samples/slow032.txt +++ b/t/pt-query-digest/samples/slow032.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7546F89214254F2F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 1 diff --git a/t/pt-query-digest/samples/slow033-precise-since-until.txt b/t/pt-query-digest/samples/slow033-precise-since-until.txt index 85e224d7..f036f4cf 100644 --- a/t/pt-query-digest/samples/slow033-precise-since-until.txt +++ b/t/pt-query-digest/samples/slow033-precise-since-until.txt @@ -2,7 +2,6 @@ # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,7 +33,6 @@ SELECT * FROM bar\G # Query 2: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-event-1h.txt b/t/pt-query-digest/samples/slow033-rtm-event-1h.txt index 8ea873cc..473568ef 100644 --- a/t/pt-query-digest/samples/slow033-rtm-event-1h.txt +++ b/t/pt-query-digest/samples/slow033-rtm-event-1h.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-event-25h.txt b/t/pt-query-digest/samples/slow033-rtm-event-25h.txt index 51e164f2..7b70d373 100644 --- a/t/pt-query-digest/samples/slow033-rtm-event-25h.txt +++ b/t/pt-query-digest/samples/slow033-rtm-event-25h.txt @@ -2,7 +2,6 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-25 11:19:27 to 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt b/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt index 918f7d0e..076011a9 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-1d.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -76,7 +74,6 @@ SELECT * FROM foo\G # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -108,7 +105,6 @@ SELECT * FROM bar\G # Query 2: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -146,7 +142,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt index 6b5f5ca5..5bab12fe 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30m.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -76,7 +74,6 @@ SELECT * FROM foo\G # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -113,7 +110,6 @@ SELECT * FROM bar\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -150,7 +146,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt index d883cd9a..c4d782bb 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30s-3iter.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -76,7 +74,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 344 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:19:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt b/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt index d05a1b4a..24839a2d 100644 --- a/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt +++ b/t/pt-query-digest/samples/slow033-rtm-interval-30s.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-25 11:19:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -76,7 +74,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 344 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:19:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -113,7 +110,6 @@ SELECT * FROM bar\G # Query 1: 0 QPS, 0x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -150,7 +146,6 @@ SELECT * FROM bar\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 683 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-27 11:30:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -187,7 +182,6 @@ SELECT * FROM foo\G # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-since-Nd.txt b/t/pt-query-digest/samples/slow033-since-Nd.txt index bbe54d68..024db278 100644 --- a/t/pt-query-digest/samples/slow033-since-Nd.txt +++ b/t/pt-query-digest/samples/slow033-since-Nd.txt @@ -2,7 +2,6 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-25 11:19:27 to 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,7 +33,6 @@ SELECT * FROM foo\G # Query 2: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-since-yymmdd.txt b/t/pt-query-digest/samples/slow033-since-yymmdd.txt index 17b22589..21f1cc67 100644 --- a/t/pt-query-digest/samples/slow033-since-yymmdd.txt +++ b/t/pt-query-digest/samples/slow033-since-yymmdd.txt @@ -2,7 +2,6 @@ # Query 1: 2 QPS, 0.00x concurrency, ID 0x07AEF8EFAB3FA3CE at byte 509 ___ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-27 11:19:30 to 11:19:31 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -34,7 +33,6 @@ SELECT * FROM bar\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-27 11:30:00 to 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt b/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt index 73ce9512..4e0d2d2a 100644 --- a/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt +++ b/t/pt-query-digest/samples/slow033-since-yyyy-mm-dd.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAC1BF726F2AB10C5 at byte 861 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-07-28 18:00:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow033-until-date.txt b/t/pt-query-digest/samples/slow033-until-date.txt index 12f34ffa..26373f79 100644 --- a/t/pt-query-digest/samples/slow033-until-date.txt +++ b/t/pt-query-digest/samples/slow033-until-date.txt @@ -2,7 +2,6 @@ # Query 1: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 179 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-07-25 11:19:27 to 2009-07-26 11:19:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt index 2a8495c8..d861d638 100644 --- a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt +++ b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum-with-Locktime-distro.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xABE9508269335CD1 at byte 1866 _____ # Scores: V/M = 0.00 -# Lock_time sparkline: | ^| # Time range: all events occurred at 2009-08-05 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ select * from forest WHERE animal = 'dead'\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 934 # Scores: V/M = 0.03 -# Lock_time sparkline: | _^ | # Time range: 2009-08-05 11:00:27 to 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -72,7 +70,6 @@ SELECT * FROM foo\G # Query 3: 0 QPS, 0x concurrency, ID 0xB79802214165F670 at byte 1267 _____ # Scores: V/M = 0.73 -# Lock_time sparkline: | ^^ | # Time range: all events occurred at 2009-08-05 12:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -103,7 +100,6 @@ INSERT INTO tbl VALUES ('a', 'b')\G # Query 4: 0 QPS, 0x concurrency, ID 0x1F9B2F47A843D460 at byte 333 ______ # Scores: V/M = 0.00 -# Lock_time sparkline: | ^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -133,7 +129,6 @@ SELECT id FROM tbl WHERE id = 1\G # Query 5: 0 QPS, 0x concurrency, ID 0x3F1024B96D9D469E at byte 625 ______ # Scores: V/M = 0.00 -# Lock_time sparkline: |^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt index be7bdd41..56feb2ef 100644 --- a/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt +++ b/t/pt-query-digest/samples/slow034-order-by-Locktime-sum.txt @@ -1,7 +1,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xABE9508269335CD1 at byte 1866 _____ # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2009-08-05 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ select * from forest WHERE animal = 'dead'\G # Query 2: 0.00 QPS, 0.00x concurrency, ID 0xAC1BF726F2AB10C5 at byte 934 # Scores: V/M = 0.03 -# Query_time sparkline: | ^ | # Time range: 2009-08-05 11:00:27 to 13:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -72,7 +70,6 @@ SELECT * FROM foo\G # Query 3: 0 QPS, 0x concurrency, ID 0xB79802214165F670 at byte 1267 _____ # Scores: V/M = 0.73 -# Query_time sparkline: | ^ ^ | # Time range: all events occurred at 2009-08-05 12:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -103,7 +100,6 @@ INSERT INTO tbl VALUES ('a', 'b')\G # Query 4: 0 QPS, 0x concurrency, ID 0x1F9B2F47A843D460 at byte 333 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -133,7 +129,6 @@ SELECT id FROM tbl WHERE id = 1\G # Query 5: 0 QPS, 0x concurrency, ID 0x3F1024B96D9D469E at byte 625 ______ # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-08-05 11:00:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow035.txt b/t/pt-query-digest/samples/slow035.txt index a554cc12..d65d9a6a 100644 --- a/t/pt-query-digest/samples/slow035.txt +++ b/t/pt-query-digest/samples/slow035.txt @@ -20,7 +20,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x727841EC88423713 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -58,7 +57,6 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G # Query 2: 0 QPS, 0x concurrency, ID 0x9E892D4B16D7BFC2 at byte 525 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow037_report.txt b/t/pt-query-digest/samples/slow037_report.txt index 7b9de8ef..ee1fe919 100644 --- a/t/pt-query-digest/samples/slow037_report.txt +++ b/t/pt-query-digest/samples/slow037_report.txt @@ -6,7 +6,6 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xABCC9DEC8C43EEDC at byte 0 _________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow042-show-all-host.txt b/t/pt-query-digest/samples/slow042-show-all-host.txt index e43cfb4a..fe9f56dc 100644 --- a/t/pt-query-digest/samples/slow042-show-all-host.txt +++ b/t/pt-query-digest/samples/slow042-show-all-host.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7CE9953EA3A36141 at byte 417 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-05 19:55:11 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow042.txt b/t/pt-query-digest/samples/slow042.txt index 78cc99cb..bee41e47 100644 --- a/t/pt-query-digest/samples/slow042.txt +++ b/t/pt-query-digest/samples/slow042.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x7CE9953EA3A36141 at byte 417 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-05 19:55:11 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow048.txt b/t/pt-query-digest/samples/slow048.txt index 9232d771..efa2cae1 100644 --- a/t/pt-query-digest/samples/slow048.txt +++ b/t/pt-query-digest/samples/slow048.txt @@ -2,7 +2,6 @@ # Query 1: 1.33 QPS, 0.00x concurrency, ID 0x208AC308FD716D83 at byte 454 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2010-06-24 11:48:27 to 11:48:30 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow049.txt b/t/pt-query-digest/samples/slow049.txt index c9649a8e..290045e5 100644 --- a/t/pt-query-digest/samples/slow049.txt +++ b/t/pt-query-digest/samples/slow049.txt @@ -20,7 +20,6 @@ # Query 1: 2 QPS, 1.00kx concurrency, ID 0x95AADD230F4EB56A at byte 886 __ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: 2010-06-24 11:48:34 to 11:48:35 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -48,7 +47,6 @@ SELECT two FROM two WHERE id=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x5081E1858C60FD05 at byte 1013 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2010-06-24 11:48:35 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -76,7 +74,6 @@ SELECT three FROM three WHERE id=?\G # Query 4: 1.25 QPS, 12.50x concurrency, ID 0x70E215C4BFED0080 at byte 633 # This item is included in the report because it matches --outliers. # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: 2010-06-24 11:48:21 to 11:48:25 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow050.txt b/t/pt-query-digest/samples/slow050.txt index a1c87e49..594a7458 100644 --- a/t/pt-query-digest/samples/slow050.txt +++ b/t/pt-query-digest/samples/slow050.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x305E73C51188758F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^| # Time range: all events occurred at 2010-06-24 11:48:00 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow051.txt b/t/pt-query-digest/samples/slow051.txt index 11a68c8f..d96d69c2 100644 --- a/t/pt-query-digest/samples/slow051.txt +++ b/t/pt-query-digest/samples/slow051.txt @@ -2,7 +2,6 @@ # Query 1: 0.20 QPS, 0.00x concurrency, ID 0xD989521B246E945B at byte 146 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2007-12-18 11:48:27 to 11:48:37 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow052.txt b/t/pt-query-digest/samples/slow052.txt index a67e5793..92214103 100644 --- a/t/pt-query-digest/samples/slow052.txt +++ b/t/pt-query-digest/samples/slow052.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x32B0659E6D13E5A2 at byte 16849 ____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.48 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 100 @@ -32,7 +31,6 @@ select very_variable_column from unsteady_table\G # Query 2: 0 QPS, 0x concurrency, ID 0x2F621C2B0611518C at byte 8582 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 50 100 diff --git a/t/pt-query-digest/samples/slow053.txt b/t/pt-query-digest/samples/slow053.txt index 9a115bc1..51fec151 100644 --- a/t/pt-query-digest/samples/slow053.txt +++ b/t/pt-query-digest/samples/slow053.txt @@ -2,7 +2,6 @@ # Query 1: 2 QPS, 1.90x concurrency, ID 0xA4EAD36B5CEB1C13 at byte 1044 __ # This item is included in the report because it matches --limit. # Scores: V/M = 0.01 -# Query_time sparkline: | ^^ | # Time range: 2011-02-08 12:00:09 to 12:00:10 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -33,7 +32,6 @@ SELECT * FROM blah WHERE id IS NOT NULL\G # Query 2: 1.50 QPS, 0.03x concurrency, ID 0xAC0EC652760FEEB3 at byte 913 # This item is included in the report because it matches --limit. # Scores: V/M = 0.03 -# Query_time sparkline: | ^ _ | # Time range: 2011-02-08 12:00:06 to 12:00:08 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -64,7 +62,6 @@ SELECT * FROM bar WHERE id=12\G # Query 3: 1.25 QPS, 0.00x concurrency, ID 0xBB11C6B7F3BAAB30 at byte 521 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2011-02-08 12:00:01 to 12:00:05 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow054.txt b/t/pt-query-digest/samples/slow054.txt index 0c56aafe..ed380133 100644 --- a/t/pt-query-digest/samples/slow054.txt +++ b/t/pt-query-digest/samples/slow054.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xBB11C6B7F3BAAB30 at byte 1058 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2011-02-08 12:00:01 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow055.txt b/t/pt-query-digest/samples/slow055.txt index b6a8779b..c834e436 100644 --- a/t/pt-query-digest/samples/slow055.txt +++ b/t/pt-query-digest/samples/slow055.txt @@ -6,7 +6,6 @@ # Item 1: 0 QPS, 0x concurrency, ID 0xE9800998ECF8427E at byte 420 _______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.01 -# Query_time sparkline: |^ ^ ^ | # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= # Count 100 3 diff --git a/t/pt-query-digest/samples/slow056.txt b/t/pt-query-digest/samples/slow056.txt index 1643ae9b..a265aa1f 100644 --- a/t/pt-query-digest/samples/slow056.txt +++ b/t/pt-query-digest/samples/slow056.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x54E0BB9E70EAA792 at byte 596 ______ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2012-11-23 19:56:06 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -39,7 +38,6 @@ select b = b + 30 from t where user_id=1\G # Query 2: 0 QPS, 0x concurrency, ID 0xE9800998ECF8427E at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2012-11-23 19:56:06 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump001.txt b/t/pt-query-digest/samples/tcpdump001.txt index 856c2dd6..ba6bb94a 100644 --- a/t/pt-query-digest/samples/tcpdump001.txt +++ b/t/pt-query-digest/samples/tcpdump001.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xA3C9C49321D65C30 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 09:50:16.805123 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump002_report.txt b/t/pt-query-digest/samples/tcpdump002_report.txt index 820740b7..91958ebb 100644 --- a/t/pt-query-digest/samples/tcpdump002_report.txt +++ b/t/pt-query-digest/samples/tcpdump002_report.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 1470 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 11:00:13.118191 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -30,7 +29,6 @@ administrator command: Connect\G # Query 2: 0 QPS, 0x concurrency, ID 0xE3A3649C5FAC418D at byte 2449 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 11:00:13.118643 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -59,7 +57,6 @@ select @@version_comment limit 1\G # Query 3: 0 QPS, 0x concurrency, ID 0xAE5A83B27932AB98 at byte 3298 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 11:00:13.119079 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -88,7 +85,6 @@ select "paris in the the spring" as trick\G # Query 4: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 4186 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2009-04-12 11:00:13.119487 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump003.txt b/t/pt-query-digest/samples/tcpdump003.txt index 973492a3..2ae2d210 100644 --- a/t/pt-query-digest/samples/tcpdump003.txt +++ b/t/pt-query-digest/samples/tcpdump003.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x5D51E5F01B88B79E at byte 1455 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 12:41:46.357853 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump012.txt b/t/pt-query-digest/samples/tcpdump012.txt index 856c2dd6..ba6bb94a 100644 --- a/t/pt-query-digest/samples/tcpdump012.txt +++ b/t/pt-query-digest/samples/tcpdump012.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xA3C9C49321D65C30 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-04-12 09:50:16.805123 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump017_report.txt b/t/pt-query-digest/samples/tcpdump017_report.txt index bd81c04c..b387b333 100644 --- a/t/pt-query-digest/samples/tcpdump017_report.txt +++ b/t/pt-query-digest/samples/tcpdump017_report.txt @@ -11,7 +11,6 @@ # Query 1: 2.13 QPS, 0.36x concurrency, ID 0xE3A3649C5FAC418D at byte 2548 # This item is included in the report because it matches --limit. # Scores: V/M = 0.19 -# Query_time sparkline: | ^ ^ | # Time range: 2009-04-12 11:00:13.118643 to 11:00:14.999999 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump021.txt b/t/pt-query-digest/samples/tcpdump021.txt index 5348e285..82d201e1 100644 --- a/t/pt-query-digest/samples/tcpdump021.txt +++ b/t/pt-query-digest/samples/tcpdump021.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT i FROM d.t WHERE i=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x3F79759E7FA2F117 at byte 1106 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637892 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -70,7 +68,6 @@ SELECT i FROM d.t WHERE i="3"\G # Query 3: 0 QPS, 0x concurrency, ID 0xAA353644DE4C4CB4 at byte 1850 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: all events occurred at 2009-12-08 09:23:49.638381 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump022.txt b/t/pt-query-digest/samples/tcpdump022.txt index ed0544c3..84857f9d 100644 --- a/t/pt-query-digest/samples/tcpdump022.txt +++ b/t/pt-query-digest/samples/tcpdump022.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xC30A1A850F4E510F at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 13:41:12.811188 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT i,j FROM d.t2 WHERE i=? AND j=?\G # Query 2: 0 QPS, 0x concurrency, ID 0x26EEAE2EADD904A1 at byte 1330 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 13:41:12.811591 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump023.txt b/t/pt-query-digest/samples/tcpdump023.txt index b5a833e4..66410f70 100644 --- a/t/pt-query-digest/samples/tcpdump023.txt +++ b/t/pt-query-digest/samples/tcpdump023.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E77A2947B4BC375 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:14:55.951863 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT * FROM d.t3 WHERE v=? OR c=? OR f=?\G # Query 2: 0 QPS, 0x concurrency, ID 0xA0B1C345E8654C18 at byte 1540 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:14:55.952344 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump024.txt b/t/pt-query-digest/samples/tcpdump024.txt index 2bb6d368..521289c9 100644 --- a/t/pt-query-digest/samples/tcpdump024.txt +++ b/t/pt-query-digest/samples/tcpdump024.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E77A2947B4BC375 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:33:13.711351 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT * FROM d.t3 WHERE v=? OR c=? OR f=?\G # Query 2: 0 QPS, 0x concurrency, ID 0xA0B1C345E8654C18 at byte 1540 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:33:13.711642 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump025.txt b/t/pt-query-digest/samples/tcpdump025.txt index 921a5e50..ba38d084 100644 --- a/t/pt-query-digest/samples/tcpdump025.txt +++ b/t/pt-query-digest/samples/tcpdump025.txt @@ -2,7 +2,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x72B6E5BC2632931C at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:44:52.709181 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -35,7 +34,6 @@ SELECT * FROM d.t WHERE 1 LIMIT 1;\G # Query 2: 0 QPS, 0x concurrency, ID 0xDDF5E71E9A66B752 at byte 1014 _____ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 14:44:52.709597 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump033.txt b/t/pt-query-digest/samples/tcpdump033.txt index c91bcb08..795dc84e 100644 --- a/t/pt-query-digest/samples/tcpdump033.txt +++ b/t/pt-query-digest/samples/tcpdump033.txt @@ -13,7 +13,6 @@ # Query 1: 2.03k QPS, 0.28x concurrency, ID 0x6EE88728F6F29C72 at byte 800 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-12-18 08:44:07.235011 to 08:44:07.238467 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -48,7 +47,6 @@ select * from d.t where name="adam"\G # Query 2: 1.17k QPS, 0.19x concurrency, ID 0xECBCD0412B5E497A at byte 9215 # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: 2009-12-18 08:44:07.234727 to 08:44:07.238999 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= @@ -81,7 +79,6 @@ select * from d.t where name="daniel"\G # Query 3: 1.70k QPS, 0x concurrency, ID 0x559914DA8A7B7F28 at byte 8202 _ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | | # Time range: 2009-12-18 08:44:07.236509 to 08:44:07.238274 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/tcpdump041.txt b/t/pt-query-digest/samples/tcpdump041.txt index 9dd24427..f07654b3 100644 --- a/t/pt-query-digest/samples/tcpdump041.txt +++ b/t/pt-query-digest/samples/tcpdump041.txt @@ -11,7 +11,6 @@ # Query 1: 0 QPS, 0x concurrency, ID 0xAA8E9FA785927259 at byte 0 ________ # This item is included in the report because it matches --limit. # Scores: V/M = 0.00 -# Query_time sparkline: | ^ | # Time range: all events occurred at 2009-12-08 09:23:49.637394 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= From d0d8c5904272530f98317aebb8b581fa0bdbf1be Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 14:52:42 -0300 Subject: [PATCH 05/34] pqd: Remove --statistics, --pipeline-profile & --fingerprints, make them part of PTDEBUG --- bin/pt-query-digest | 150 ++++++++---------------------- lib/Pipeline.pm | 2 +- t/pt-query-digest/option_sanity.t | 18 ++++ t/pt-query-digest/statistics.t | 32 ------- 4 files changed, 57 insertions(+), 145 deletions(-) delete mode 100644 t/pt-query-digest/statistics.t diff --git a/bin/pt-query-digest b/bin/pt-query-digest index eebec5d5..8a826b42 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -6597,8 +6597,7 @@ sub query_report { $samp_query = $qr->shorten($samp_query, $o->get('shorten')) if $o->get('shorten'); - $report .= "# Fingerprint\n# $item\n" - if $o->get('fingerprints'); + PTDEBUG && _d("Fingerprint\n# $item\n"); $report .= $self->tables_report(@tables) if $o->get('for-explain'); @@ -11435,7 +11434,7 @@ sub new { } my $self = { - instrument => 0, + instrument => PTDEBUG, continue_on_error => 0, %args, @@ -13190,13 +13189,7 @@ sub main { stats => \%stats, }; - # Enable timings to instrument code for either of these two opts. - # Else, don't instrument to avoid cost of measurement. - my $instrument = $o->get('pipeline-profile'); - PTDEBUG && _d('Instrument:', $instrument); - my $pipeline = new Pipeline( - instrument => $instrument, continue_on_error => $o->get('continue-on-error'), ); @@ -13666,7 +13659,38 @@ sub main { print "\n# No events processed.\n"; } - if ( $o->get('statistics') ) { + if ( PTDEBUG ) { + # Print statistics about internal counters. This option is mostly for + # development and debugging. The statistics report is printed for each + # iteration after all other reports, even if no events are processed or + # C<--no-report> is specified. The statistics report looks like: + + # No events processed. + + # Statistic Count %/Events + # ================================================ ====== ======== + # events_read 142030 100.00 + # events_parsed 50430 35.51 + # events_aggregated 0 0.00 + # ignored_midstream_server_response 18111 12.75 + # no_tcp_data 91600 64.49 + # pipeline_restarted_after_MemcachedProtocolParser 142030 100.00 + # pipeline_restarted_after_TcpdumpParser 1 0.00 + # unknown_client_command 1 0.00 + # unknown_client_data 32318 22.75 + + # The first column is the internal counter name; the second column is counter's + # count; and the third column is the count as a percentage of C. + + # In this case, it shows why no events were processed/aggregated: 100% of events + # were rejected by the C. Of those, 35.51% were data + # packets, but of these 12.75% of ignored mid-stream server response, one was + # an unknown client command, and 22.75% were unknown client data. The other + # 64.49% were TCP control packets (probably most ACKs). + + # Since pt-query-digest is complex, you will probably need someone familiar + # with its code to decipher the statistics report. + if ( keys %stats ) { my $report = new ReportFormatter( line_width => 74, @@ -14236,7 +14260,7 @@ sub print_reports { } # Each groupby - if ( $o->get('pipeline-profile') ) { + if ( PTDEBUG ) { my $report = new ReportFormatter( line_width => 74, ); @@ -14257,7 +14281,7 @@ sub print_reports { # Reset profile for next iteration. $pipeline->reset(); - print "\n" . $report->get_report(); + _d($report->get_report()); } return; @@ -14744,7 +14768,6 @@ that follows. It contains the following columns: Calls The number of times this query was executed R/Call The mean response time per execution V/M The Variance-to-mean ratio of response time - EXPLAIN If --explain was specified, a sparkline; see --explain Item The distilled query A final line whose rank is shown as MISC contains aggregate statistics on the @@ -14858,12 +14881,6 @@ above, and something like the following: See also L<"--report-format">. -=head2 SPARKLINES - -The output also contains sparklines. Sparklines are "data-intense, -design-simple, word-sized graphics" (L).There is a sparkline for L<"--report-histogram"> and for L<"--explain">. -See each of those options for details about interpreting their sparklines. - =head1 QUERY REVIEWS A "query review" is the process of storing all the query fingerprints analyzed. @@ -15205,41 +15222,10 @@ be EXPLAINed. Those are typically "derived table" queries of the form select ... from ( select .... ) der; -The EXPLAIN results are printed in three places: a sparkline in the event -header, a full vertical format in the event report, and a sparkline in the -profile. - -The full format appears at the end of each event report in vertical style +The EXPLAIN results are printed as a full vertical format in the event report, +which appears at the end of each event report in vertical style (C<\G>) just like MySQL prints it. -The sparklines (see L<"SPARKLINES">) are compact representations of the -access type for each table and whether or not "Using temporary" or "Using -filesort" appear in EXPLAIN. The sparklines look like: - - nr>TF - -That sparkline means that there are two tables, the first uses a range (n) -access, the second uses a ref access, and both "Using temporary" (T) and -"Using filesort" (F) appear. The greater-than character just separates table -access codes from T and/or F. - -The abbreviated table access codes are: - - a ALL - c const - e eq_ref - f fulltext - i index - m index_merge - n range - o ref_or_null - r ref - s system - u unique_subquery - -A capitalized access code means that "Using index" appears in EXPLAIN for -that table. - =item --filter type: string @@ -15329,11 +15315,6 @@ check both. Since L<"--filter"> allows you to alter C<$event>, you can use it to do other things, like create new attributes. See L<"ATTRIBUTES"> for an example. -=item --fingerprints - -Add query fingerprints to the standard query analysis report. This is mostly -useful for debugging purposes. - =item --[no]for-explain default: yes @@ -15546,10 +15527,6 @@ daemonized instance exits. The program checks for the existence of the PID file when starting; if it exists and the process with the matching PID exists, the program exits. -=item --pipeline-profile - -Print a profile of the pipeline processes. - =item --port short form: -P; type: int @@ -15659,24 +15636,6 @@ like: # 1s ######## # 10s+ -A sparkline (see L<"SPARKLINES">) of the full chart is also printed in the -header for each query event. The sparkline of that full chart is: - - # Query_time sparkline: | .^_ | - -The sparkline itself is the 8 characters between the pipes (C<|>), one character -for each of the 8 buckets (1us, 10us, etc.) Four character codes are used -to represent the approximate relation between each bucket's value: - - _ . - ^ - -The caret C<^> represents peaks (buckets with the most values), and -the underscore C<_> represents lows (buckets with the least or at least -one value). The period C<.> and the hyphen C<-> represent buckets with values -between these two extremes. If a bucket has no values, a space is printed. -So in the example above, the period represents the 10ms bucket, the caret -the 100ms bucket, and the underscore the 1s bucket. - See L<"OUTPUT"> for more information. =item --review @@ -16060,39 +16019,6 @@ short form: -S; type: string Socket file to use for connection. -=item --statistics - -Print statistics about internal counters. This option is mostly for -development and debugging. The statistics report is printed for each -iteration after all other reports, even if no events are processed or -C<--no-report> is specified. The statistics report looks like: - - # No events processed. - - # Statistic Count %/Events - # ================================================ ====== ======== - # events_read 142030 100.00 - # events_parsed 50430 35.51 - # events_aggregated 0 0.00 - # ignored_midstream_server_response 18111 12.75 - # no_tcp_data 91600 64.49 - # pipeline_restarted_after_MemcachedProtocolParser 142030 100.00 - # pipeline_restarted_after_TcpdumpParser 1 0.00 - # unknown_client_command 1 0.00 - # unknown_client_data 32318 22.75 - -The first column is the internal counter name; the second column is counter's -count; and the third column is the count as a percentage of C. - -In this case, it shows why no events were processed/aggregated: 100% of events -were rejected by the C. Of those, 35.51% were data -packets, but of these 12.75% of ignored mid-stream server response, one was -an unknown client command, and 22.75% were unknown client data. The other -64.49% were TCP control packets (probably most ACKs). - -Since pt-query-digest is complex, you will probably need someone familiar -with its code to decipher the statistics report. - =item --table-access Print a table access report. diff --git a/lib/Pipeline.pm b/lib/Pipeline.pm index 9a890716..ecea20bf 100644 --- a/lib/Pipeline.pm +++ b/lib/Pipeline.pm @@ -42,7 +42,7 @@ sub new { my $self = { # default values for optional args - instrument => 0, + instrument => PTDEBUG, continue_on_error => 0, # specified arg values override defaults diff --git a/t/pt-query-digest/option_sanity.t b/t/pt-query-digest/option_sanity.t index 5e4cf18a..a7eb4956 100644 --- a/t/pt-query-digest/option_sanity.t +++ b/t/pt-query-digest/option_sanity.t @@ -58,6 +58,24 @@ like $output, qr/\Q--embedded-attributes POSIX syntax [: :] belongs inside character/, "Bug 885382: --embedded-attributes rejects warning patterns early";; + +# We removed --statistics, but they should still print out if we use PTDEBUG. + +$output = qx{PTDEBUG=1 $trunk/bin/pt-query-digest --no-report ${sample}slow002.txt 2>&1}; +my $stats = slurp_file("t/pt-query-digest/samples/stats-slow002.txt"); + +like( + $output, + qr/\Q$stats\E/m, + 'PTDEBUG shows --statistics for slow002.txt', +); + +like( + $output, + qr/Pipeline profile/m, + 'PTDEBUG shows --pipeline-profile' +); + # ############################################################################# # pt-query-digest help output mangled # https://bugs.launchpad.net/percona-toolkit/+bug/831525 diff --git a/t/pt-query-digest/statistics.t b/t/pt-query-digest/statistics.t deleted file mode 100644 index 6609f592..00000000 --- a/t/pt-query-digest/statistics.t +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env perl - -BEGIN { - die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" - unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; - unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; -}; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use Test::More tests => 1; - -use PerconaTest; - -require "$trunk/bin/pt-query-digest"; - -my @args = qw(--no-report --statistics); -my $sample = "$trunk/t/lib/samples/slowlogs/"; - -ok( - no_diff( - sub { pt_query_digest::main(@args, $sample.'slow002.txt') }, - "t/pt-query-digest/samples/stats-slow002.txt" - ), - '--statistics for slow002.txt', -); - -# ############################################################################# -# Done. -# ############################################################################# -exit; From a1f1e4ae289ff0f5d1861909d248338a861ce4b2 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 14:59:23 -0300 Subject: [PATCH 06/34] pqd: Removed --table-access --- bin/pt-query-digest | 76 ------------------- .../samples/slow020_table_access.txt | 3 - .../samples/slow030_table_access.txt | 2 - t/pt-query-digest/slowlog_analyses.t | 18 ----- 4 files changed, 99 deletions(-) delete mode 100644 t/pt-query-digest/samples/slow020_table_access.txt delete mode 100644 t/pt-query-digest/samples/slow030_table_access.txt diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 8a826b42..be60329e 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -14242,14 +14242,6 @@ sub print_reports { $tls->[$i]->reset_aggregated_data(); } - if ( $o->get('table-access') ) { # --table-access - print_table_access_report( - ea => $eas->[$i], - worst => $worst, - %args, - ); - } - $eas->[$i]->reset_aggregated_data(); # Reset for next iteration. # Print header report only once. So remove it from the @@ -14435,54 +14427,6 @@ sub get_worst_queries { return $ea->top_events(%top_spec); } -sub print_table_access_report { - my ( %args ) = @_; - my @required_args = qw(ea worst QueryParser QueryRewriter OptionParser Quoter); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my ($ea, $worst, $qp, $qr, $o, $q) = @args{@required_args}; - - my %seen; - PTDEBUG && _d('Doing table access report'); - - foreach my $worst_info ( @$worst ) { - my $item = $worst_info->[0]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; - my $samp_query = $sample->{arg} || ''; - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - eval { - QUERY: - foreach my $query ( $qp->split($samp_query) ) { - my $rw = $qp->query_type($query, $qr)->{rw}; - next QUERY unless $rw; - my @tables = $qp->extract_tables( - query => $query, - default_db => $default_db, - Quoter => $args{Quoter}, - ); - next QUERY unless scalar @tables; - DB_TBL: - foreach my $tbl_info ( @tables ) { - my ($db, $tbl) = @$tbl_info; - $db = $db ? "`$db`." : ''; - next DB_TBL if $seen{"$db$tbl"}++; # Unique-ify for issue 337. - print "$rw $db`$tbl`\n"; - } - } - }; - if ( $EVAL_ERROR ) { - PTDEBUG && _d($EVAL_ERROR); - warn "Cannot get table access for query $_"; - } - } - - return; -} - sub update_query_review_tables { my ( %args ) = @_; foreach my $arg ( qw(ea worst QueryReview OptionParser) ) { @@ -16019,26 +15963,6 @@ short form: -S; type: string Socket file to use for connection. -=item --table-access - -Print a table access report. - -The table access report shows which tables are accessed by all the queries -and if the access is a read or write. The report looks like: - - write `baz`.`tbl` - read `baz`.`new_tbl` - write `baz`.`tbl3` - write `db6`.`tbl6` - -If you pipe the output to L, the read and write tables will be grouped -together and sorted alphabetically: - - read `baz`.`new_tbl` - write `baz`.`tbl` - write `baz`.`tbl3` - write `db6`.`tbl6` - =item --tcpdump-errors type: string diff --git a/t/pt-query-digest/samples/slow020_table_access.txt b/t/pt-query-digest/samples/slow020_table_access.txt deleted file mode 100644 index 4b11cac7..00000000 --- a/t/pt-query-digest/samples/slow020_table_access.txt +++ /dev/null @@ -1,3 +0,0 @@ -read `db2`.`foo` -write `db`.`tbl` -read `db1`.`foo` diff --git a/t/pt-query-digest/samples/slow030_table_access.txt b/t/pt-query-digest/samples/slow030_table_access.txt deleted file mode 100644 index 3e8364d9..00000000 --- a/t/pt-query-digest/samples/slow030_table_access.txt +++ /dev/null @@ -1,2 +0,0 @@ -read `foo` -read `bar` diff --git a/t/pt-query-digest/slowlog_analyses.t b/t/pt-query-digest/slowlog_analyses.t index a7c612fa..eb4eef0f 100644 --- a/t/pt-query-digest/slowlog_analyses.t +++ b/t/pt-query-digest/slowlog_analyses.t @@ -276,24 +276,6 @@ ok( 'Distill UNLOCK and LOCK TABLES' ); -# Test --table-access. -ok( - no_diff( - sub { pt_query_digest::main(@args, $sample.'slow020.txt', qw(--no-report --table-access)) }, - "t/pt-query-digest/samples/slow020_table_access.txt", - ), - 'Analysis for slow020 with --table-access' -); - -# This one tests that the list of tables is unique. -ok( - no_diff( - sub { pt_query_digest::main(@args, $sample.'slow030.txt', qw(--no-report --table-access)) }, - "t/pt-query-digest/samples/slow030_table_access.txt" - ), - 'Analysis for slow030 with --table-access' -); - ok( no_diff( sub { pt_query_digest::main(@args, $sample.'slow034.txt', qw(--order-by Lock_time:sum), From 8e3605635943a3036d4259e29264a1fecce5a6c0 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Fri, 11 Jan 2013 15:37:55 -0300 Subject: [PATCH 07/34] pq: Remove --for-explain, make it always show the extra info --- bin/pt-query-digest | 22 +++------------------- lib/QueryReportFormatter.pm | 17 ++++------------- 2 files changed, 7 insertions(+), 32 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index be60329e..5208a27e 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -6555,14 +6555,11 @@ sub query_report { my ($default_db) = $sample->{db} ? $sample->{db} : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} : undef; - my @tables; - if ( $o->get('for-explain') ) { - @tables = $self->{QueryParser}->extract_tables( + my @tables = $self->{QueryParser}->extract_tables( query => $samp_query, default_db => $default_db, Quoter => $self->{Quoter}, ); - } $report .= "\n" if $rank > 1; # space between each event report $report .= $self->event_report( @@ -6599,8 +6596,7 @@ sub query_report { PTDEBUG && _d("Fingerprint\n# $item\n"); - $report .= $self->tables_report(@tables) - if $o->get('for-explain'); + $report .= $self->tables_report(@tables); if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { my $crc = crc32($samp_query); @@ -6624,8 +6620,7 @@ sub query_report { else { $report .= "$samp_query${mark}\n"; my $converted = $qr->convert_to_select($samp_query); - if ( $o->get('for-explain') - && $converted + if ( $converted && $converted =~ m/^[\(\s]*select/i ) { $report .= "# Converted for EXPLAIN\n# EXPLAIN /*!50100 PARTITIONS*/\n$converted${mark}\n"; } @@ -15259,17 +15254,6 @@ check both. Since L<"--filter"> allows you to alter C<$event>, you can use it to do other things, like create new attributes. See L<"ATTRIBUTES"> for an example. -=item --[no]for-explain - -default: yes - -Print extra information to make analysis easy. - -This option adds code snippets to make it easy to run SHOW CREATE TABLE and SHOW -TABLE STATUS for the query's tables. It also rewrites non-SELECT queries into a -SELECT that might be helpful for determining the non-SELECT statement's index -usage. - =item --group-by type: Array; default: fingerprint diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index 33f99410..01f84218 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -364,20 +364,14 @@ sub query_report { next ITEM if $review_vals->{reviewed_by} && !$o->get('report-all'); } - # ############################################################### - # Get tables for --for-explain. - # ############################################################### my ($default_db) = $sample->{db} ? $sample->{db} : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} : undef; - my @tables; - if ( $o->get('for-explain') ) { - @tables = $self->{QueryParser}->extract_tables( + my @tables = $self->{QueryParser}->extract_tables( query => $samp_query, default_db => $default_db, Quoter => $self->{Quoter}, ); - } # ############################################################### # Print the standard query analysis report. @@ -419,12 +413,10 @@ sub query_report { if $o->get('shorten'); # Print query fingerprint. - $report .= "# Fingerprint\n# $item\n" - if $o->get('fingerprints'); + PTDEBUG && _d("Fingerprint\n# $item\n"); # Print tables used by query. - $report .= $self->tables_report(@tables) - if $o->get('for-explain'); + $report .= $self->tables_report(@tables); # Print sample (worst) query's CRC % 1_000. We mod 1_000 because # that's actually the value stored in the ea, not the full checksum. @@ -453,8 +445,7 @@ sub query_report { else { $report .= "$samp_query${mark}\n"; my $converted = $qr->convert_to_select($samp_query); - if ( $o->get('for-explain') - && $converted + if ( $converted && $converted =~ m/^[\(\s]*select/i ) { # It converted OK to a SELECT $report .= "# Converted for EXPLAIN\n# EXPLAIN /*!50100 PARTITIONS*/\n$converted${mark}\n"; From cd32c04ed124c16b9fc94fedf2c18ce32354582a Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 14 Jan 2013 12:48:03 -0300 Subject: [PATCH 08/34] Mo: Added an override() export --- lib/Mo.pm | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/Mo.pm b/lib/Mo.pm index ba96a512..65bccd5c 100644 --- a/lib/Mo.pm +++ b/lib/Mo.pm @@ -177,6 +177,7 @@ sub Mo::import { _set_package_isa($caller, @_); _set_inherited_metadata($caller); }, + override => \&override, has => sub { my $names = shift; for my $attribute ( ref $names ? @$names : $names ) { @@ -512,6 +513,16 @@ BEGIN { } } +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + } 1; # ########################################################################### From e5d17af0fe1fea73dd4b1fbad936df3ec3decc74 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 14 Jan 2013 12:49:55 -0300 Subject: [PATCH 09/34] Mo'ified and minimally simplified ReportFormatter --- lib/ReportFormatter.pm | 211 ++++++++++++++++++++++++++-------------- t/lib/ReportFormatter.t | 18 ++-- 2 files changed, 149 insertions(+), 80 deletions(-) diff --git a/lib/ReportFormatter.pm b/lib/ReportFormatter.pm index 9b85f6b5..06c7ba46 100644 --- a/lib/ReportFormatter.pm +++ b/lib/ReportFormatter.pm @@ -56,8 +56,7 @@ # calculated widths. package ReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use constant PTDEBUG => $ENV{PTDEBUG} || 0; @@ -67,7 +66,6 @@ use POSIX qw(ceil); eval { require Term::ReadKey }; my $have_term = $EVAL_ERROR ? 0 : 1; -# Arguments: # * underline_header bool: underline headers with = # * line_prefix scalar: prefix every line with this string # * line_width scalar: line width in characters or 'auto' @@ -77,42 +75,106 @@ my $have_term = $EVAL_ERROR ? 0 : 1; # * column_errors scalar: die or warn on column errors (default warn) # * truncate_header_side scalar: left or right (default left) # * strip_whitespace bool: strip leading and trailing whitespace -sub new { - my ( $class, %args ) = @_; - my @required_args = qw(); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my $self = { - underline_header => 1, - line_prefix => '# ', - line_width => 78, - column_spacing => ' ', - extend_right => 0, - truncate_line_mark => '...', - column_errors => 'warn', - truncate_header_side => 'left', - strip_whitespace => 1, - %args, # args above can be overriden, args below cannot - n_cols => 0, - }; +# * title scalar: title for the report + +has underline_header => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has line_prefix => ( + is => 'ro', + isa => 'Str', + default => sub { '# ' }, +); +has line_width => ( + is => 'ro', + isa => 'Int', + default => sub { 78 }, +); +has column_spacing => ( + is => 'ro', + isa => 'Str', + default => sub { ' ' }, +); +has extend_right => ( + is => 'ro', + isa => 'Bool', + default => sub { '' }, +); +has truncate_line_mark => ( + is => 'ro', + isa => 'Str', + default => sub { '...' }, +); +has column_errors => ( + is => 'ro', + isa => 'Str', + default => sub { 'warn' }, +); +has truncate_header_side => ( + is => 'ro', + isa => 'Str', + default => sub { 'left' }, +); +has strip_whitespace => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has title => ( + is => 'rw', + isa => 'Str', + predicate => 'has_title', +); + +# Internal + +has n_cols => ( + is => 'rw', + isa => 'Int', + default => sub { 0 }, + init_arg => undef, +); + +has cols => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_cols', +); + +has lines => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_lines', +); + +has truncate_headers => ( + is => 'rw', + isa => 'Bool', + default => sub { undef }, + init_arg => undef, + clearer => 'clear_truncate_headers', +); + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); # This is not tested or currently used, but I like the idea and - # think one day it will be very handy in mk-config-diff. - if ( ($self->{line_width} || '') eq 'auto' ) { + # think one day it will be very handy in pt-config-diff. + if ( ($args->{line_width} || '') eq 'auto' ) { die "Cannot auto-detect line width because the Term::ReadKey module " . "is not installed" unless $have_term; - ($self->{line_width}) = GetTerminalSize(); + ($args->{line_width}) = GetTerminalSize(); + PTDEBUG && _d('Line width:', $args->{line_width}); } - PTDEBUG && _d('Line width:', $self->{line_width}); - return bless $self, $class; -} - -sub set_title { - my ( $self, $title ) = @_; - $self->{title} = $title; - return; + return $args; } # @cols is an array of hashrefs. Each hashref describes a column and can @@ -139,7 +201,7 @@ sub set_columns { die "Column does not have a name" unless defined $col_name; if ( $col->{width} ) { - $col->{width_pct} = ceil(($col->{width} * 100) / $self->{line_width}); + $col->{width_pct} = ceil(($col->{width} * 100) / $self->line_width()); PTDEBUG && _d('col:', $col_name, 'width:', $col->{width}, 'chars =', $col->{width_pct}, '%'); } @@ -172,10 +234,10 @@ sub set_columns { # Used with extend_right. $col->{right_most} = 1 if $i == $#cols; - push @{$self->{cols}}, $col; + push @{$self->cols}, $col; } - $self->{n_cols} = scalar @cols; + $self->n_cols( scalar @cols ); if ( ($used_width || 0) > 100 ) { die "Total width_pct for all columns is >100%"; @@ -186,16 +248,16 @@ sub set_columns { my $wid_per_col = int((100 - $used_width) / scalar @auto_width_cols); PTDEBUG && _d('Line width left:', (100-$used_width), '%;', 'each auto width col:', $wid_per_col, '%'); - map { $self->{cols}->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; + map { $self->cols->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; } # Add to the minimum possible header width the spacing between columns. - $min_hdr_wid += ($self->{n_cols} - 1) * length $self->{column_spacing}; + $min_hdr_wid += ($self->n_cols() - 1) * length $self->column_spacing(); PTDEBUG && _d('min header width:', $min_hdr_wid); - if ( $min_hdr_wid > $self->{line_width} ) { + if ( $min_hdr_wid > $self->line_width() ) { PTDEBUG && _d('Will truncate headers because min header width', - $min_hdr_wid, '> line width', $self->{line_width}); - $self->{truncate_headers} = 1; + $min_hdr_wid, '> line width', $self->line_width()); + $self->truncate_headers(1); } return; @@ -207,14 +269,14 @@ sub set_columns { sub add_line { my ( $self, @vals ) = @_; my $n_vals = scalar @vals; - if ( $n_vals != $self->{n_cols} ) { + if ( $n_vals != $self->n_cols() ) { $self->_column_error("Number of values $n_vals does not match " - . "number of columns $self->{n_cols}"); + . "number of columns " . $self->n_cols()); } for my $i ( 0..($n_vals-1) ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals[$i] ? $vals[$i] : $col->{undef_value}; - if ( $self->{strip_whitespace} ) { + if ( $self->strip_whitespace() ) { $val =~ s/^\s+//g; $val =~ s/\s+$//; $vals[$i] = $val; @@ -223,7 +285,7 @@ sub add_line { $col->{min_val} = min($width, ($col->{min_val} || $width)); $col->{max_val} = max($width, ($col->{max_val} || $width)); } - push @{$self->{lines}}, \@vals; + push @{$self->lines}, \@vals; return; } @@ -232,12 +294,14 @@ sub get_report { my ( $self, %args ) = @_; $self->_calculate_column_widths(); - $self->_truncate_headers() if $self->{truncate_headers}; + if ( $self->truncate_headers() ) { + $self->_truncate_headers(); + } $self->_truncate_line_values(%args); my @col_fmts = $self->_make_column_formats(); - my $fmt = ($self->{line_prefix} || '') - . join($self->{column_spacing}, @col_fmts); + my $fmt = $self->line_prefix() + . join($self->column_spacing(), @col_fmts); PTDEBUG && _d('Format:', $fmt); # Make the printf line format for the header and ensure that its labels @@ -246,15 +310,15 @@ sub get_report { # Build the report line by line, starting with the title and header lines. my @lines; - push @lines, sprintf "$self->{line_prefix}$self->{title}" if $self->{title}; + push @lines, $self->line_prefix() . $self->title() if $self->has_title(); push @lines, $self->_truncate_line( - sprintf($hdr_fmt, map { $_->{name} } @{$self->{cols}}), + sprintf($hdr_fmt, map { $_->{name} } @{$self->cols}), strip => 1, mark => '', ); - if ( $self->{underline_header} ) { - my @underlines = map { '=' x $_->{print_width} } @{$self->{cols}}; + if ( $self->underline_header() ) { + my @underlines = map { '=' x $_->{print_width} } @{$self->cols}; push @lines, $self->_truncate_line( sprintf($fmt, map { $_ || '' } @underlines), mark => '', @@ -265,19 +329,24 @@ sub get_report { my $vals = $_; my $i = 0; my @vals = map { - my $val = defined $_ ? $_ : $self->{cols}->[$i++]->{undef_value}; + my $val = defined $_ ? $_ : $self->cols->[$i++]->{undef_value}; $val = '' if !defined $val; $val =~ s/\n/ /g; $val; } @$vals; my $line = sprintf($fmt, @vals); - if ( $self->{extend_right} ) { + if ( $self->extend_right() ) { $line; } else { $self->_truncate_line($line); } - } @{$self->{lines}}; + } @{$self->lines}; + + # Clean up any leftover state + $self->clear_cols(); + $self->clear_lines(); + $self->clear_truncate_headers(); return join("\n", @lines) . "\n"; } @@ -285,7 +354,7 @@ sub get_report { sub truncate_value { my ( $self, $col, $val, $width, $side ) = @_; return $val if length $val <= $width; - return $val if $col->{right_most} && $self->{extend_right}; + return $val if $col->{right_most} && $self->extend_right(); $side ||= $col->{truncate_side}; my $mark = $col->{truncate_mark}; if ( $side eq 'right' ) { @@ -305,8 +374,8 @@ sub _calculate_column_widths { my ( $self ) = @_; my $extra_space = 0; - foreach my $col ( @{$self->{cols}} ) { - my $print_width = int($self->{line_width} * ($col->{width_pct} / 100)); + foreach my $col ( @{$self->cols} ) { + my $print_width = int($self->line_width() * ($col->{width_pct} / 100)); PTDEBUG && _d('col:', $col->{name}, 'width pct:', $col->{width_pct}, 'char width:', $print_width, @@ -330,7 +399,7 @@ sub _calculate_column_widths { PTDEBUG && _d('Extra space:', $extra_space); while ( $extra_space-- ) { - foreach my $col ( @{$self->{cols}} ) { + foreach my $col ( @{$self->cols} ) { if ( $col->{auto_width} && ( $col->{print_width} < $col->{max_val} || $col->{print_width} < $col->{header_width}) @@ -346,8 +415,8 @@ sub _calculate_column_widths { sub _truncate_headers { my ( $self, $col ) = @_; - my $side = $self->{truncate_header_side}; - foreach my $col ( @{$self->{cols}} ) { + my $side = $self->truncate_header_side(); + foreach my $col ( @{$self->cols} ) { my $col_name = $col->{name}; my $print_width = $col->{print_width}; next if length $col_name <= $print_width; @@ -360,10 +429,10 @@ sub _truncate_headers { sub _truncate_line_values { my ( $self, %args ) = @_; - my $n_vals = $self->{n_cols} - 1; - foreach my $vals ( @{$self->{lines}} ) { + my $n_vals = $self->n_cols() - 1; + foreach my $vals ( @{$self->lines} ) { for my $i ( 0..$n_vals ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals->[$i] ? $vals->[$i] : $col->{undef_value}; my $width = length $val; @@ -393,9 +462,9 @@ sub _truncate_line_values { sub _make_column_formats { my ( $self ) = @_; my @col_fmts; - my $n_cols = $self->{n_cols} - 1; + my $n_cols = $self->n_cols() - 1; for my $i ( 0..$n_cols ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; # Normally right-most col has no width so it can potentially # extend_right. But if it's right-justified, it requires a width. @@ -410,12 +479,12 @@ sub _make_column_formats { sub _truncate_line { my ( $self, $line, %args ) = @_; - my $mark = defined $args{mark} ? $args{mark} : $self->{truncate_line_mark}; + my $mark = defined $args{mark} ? $args{mark} : $self->truncate_line_mark(); if ( $line ) { $line =~ s/\s+$// if $args{strip}; my $len = length($line); - if ( $len > $self->{line_width} ) { - $line = substr($line, 0, $self->{line_width} - length $mark); + if ( $len > $self->line_width() ) { + $line = substr($line, 0, $self->line_width() - length $mark); $line .= $mark if $mark; } } @@ -425,7 +494,7 @@ sub _truncate_line { sub _column_error { my ( $self, $err ) = @_; my $msg = "Column error: $err"; - $self->{column_errors} eq 'die' ? die $msg : warn $msg; + $self->column_errors() eq 'die' ? die $msg : warn $msg; return; } diff --git a/t/lib/ReportFormatter.t b/t/lib/ReportFormatter.t index 744e85ad..7d69444a 100644 --- a/t/lib/ReportFormatter.t +++ b/t/lib/ReportFormatter.t @@ -88,7 +88,7 @@ is( # Basic report. # ############################################################################# $rf = new ReportFormatter(); -$rf->set_title('Checksum differences'); +$rf->title('Checksum differences'); $rf->set_columns( { name => 'Query ID', @@ -216,7 +216,7 @@ is( # Respect line width. # ############################################################################# $rf = new ReportFormatter(); -$rf->set_title('Respect line width'); +$rf->title('Respect line width'); $rf->set_columns( { name => 'col1' }, { name => 'col2' }, @@ -248,7 +248,7 @@ is( # extend_right # ############################################################################# $rf = new ReportFormatter(extend_right=>1); -$rf->set_title('extend_right'); +$rf->title('extend_right'); $rf->set_columns( { name => 'col1' }, { name => 'col2' }, @@ -280,7 +280,7 @@ is( # Relvative column widths. # ############################################################################# $rf = new ReportFormatter(); -$rf->set_title('Relative col widths'); +$rf->title('Relative col widths'); $rf->set_columns( { name => 'col1', width_pct=>'20', }, { name => 'col2', width_pct=>'40', }, @@ -309,7 +309,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Relative col widths'); +$rf->title('Relative col widths'); $rf->set_columns( { name => 'col1', width_pct=>'20', }, { name => 'col2', width_pct=>'40', }, @@ -344,7 +344,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Relative col widths'); +$rf->title('Relative col widths'); $rf->set_columns( { name => 'col1', width =>'25', }, { name => 'col2', width_pct=>'33', }, @@ -380,7 +380,7 @@ is( $rf = new ReportFormatter(); -$rf->set_title('Short cols'); +$rf->title('Short cols'); $rf->set_columns( { name => 'I am column1', }, { name => 'I am column2', }, @@ -403,7 +403,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Short cols'); +$rf->title('Short cols'); $rf->set_columns( { name => 'I am column1', }, { name => 'I am column2', }, @@ -422,7 +422,7 @@ is( ); $rf = new ReportFormatter(); -$rf->set_title('Short cols'); +$rf->title('Short cols'); $rf->set_columns( { name => 'I am column1', }, { name => 'I am column2', }, From 5e5763f82a2262f268f26e67cdad72638a7a3869 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 14 Jan 2013 12:52:23 -0300 Subject: [PATCH 10/34] Partially Moified QueryReportFormatter, split event_report and query_report into two different subs; one that gets the values to print, and one that formats things --- lib/QueryReportFormatter.pm | 478 +++++++++++------- t/lib/QueryReportFormatter.t | 100 +--- .../QueryReportFormatter/report027.txt | 58 --- .../QueryReportFormatter/report029.txt | 56 -- .../QueryReportFormatter/report032.txt | 56 -- 5 files changed, 312 insertions(+), 436 deletions(-) delete mode 100644 t/lib/samples/QueryReportFormatter/report027.txt delete mode 100644 t/lib/samples/QueryReportFormatter/report029.txt delete mode 100644 t/lib/samples/QueryReportFormatter/report032.txt diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index 01f84218..afc6fb82 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -29,8 +29,7 @@ # which is also in mk-query-digest. package QueryReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use POSIX qw(floor); @@ -43,6 +42,9 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0; use constant LINE_LENGTH => 74; use constant MAX_STRING_LENGTH => 10; +{ local $EVAL_ERROR; eval { require Quoter } }; +{ local $EVAL_ERROR; eval { require ReportFormatter } }; + # Sub: new # # Parameters: @@ -56,30 +58,69 @@ use constant MAX_STRING_LENGTH => 10; # Optional arguments: # QueryReview - object used in # dbh - dbh used in -# ExplainAnalyzer - object used in . # # Returns: # QueryReportFormatter object -sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(OptionParser QueryRewriter Quoter) ) { - die "I need a $arg argument" unless $args{$arg}; +has Quoter => ( + is => 'ro', + isa => 'Quoter', + default => sub { Quoter->new() }, +); + +has label_width => ( + is => 'ro', + isa => 'Int', +); + +has global_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw( total min max avg 95% stddev median)] }, +); + +has event_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw(pct total min max avg 95% stddev median)] }, +); + +has ReportFormatter => ( + is => 'ro', + isa => 'ReportFormatter', + builder => '_build_report_formatter', +); + +sub _build_report_formatter { + return ReportFormatter->new( + line_width => LINE_LENGTH, + extend_right => 1, + ); +} + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + foreach my $arg ( qw(OptionParser QueryRewriter) ) { + die "I need a $arg argument" unless $args->{$arg}; } # If ever someone wishes for a wider label width. - my $label_width = $args{label_width} || 12; + my $label_width = $args->{label_width} ||= 12; PTDEBUG && _d('Label width:', $label_width); - my $cheat_width = $label_width + 1; - + my $o = delete $args->{OptionParser}; my $self = { - %args, - label_width => $label_width, + %$args, + options => { + show_all => $o->get('show-all'), + shorten => $o->get('shorten'), + report_all => $o->get('report-all'), + report_histogram => $o->get('report-histogram'), + }, num_format => "# %-${label_width}s %3s %7s %7s %7s %7s %7s %7s %7s", bool_format => "# %-${label_width}s %3d%% yes, %3d%% no", string_format => "# %-${label_width}s %s", - global_headers => [qw( total min max avg 95% stddev median)], - event_headers => [qw(pct total min max avg 95% stddev median)], hidden_attrib => { # Don't sort/print these attribs in the reports. arg => 1, # They're usually handled specially, or not fingerprint => 1, # printed at all. @@ -87,30 +128,7 @@ sub new { ts => 1, }, }; - return bless $self, $class; -} - -# Sub: set_report_formatter -# Set a report formatter object for a report. By default this package will -# instantiate ReportFormatter objects to format columnized reports (e.g. -# for profile and prepared reports). Setting a caller-created formatter -# object (usually a obj) is used for tests. -# -# Parameters: -# %args - Arguments -# -# Required Arguments: -# report - Report name, e.g. profile, prepared, etc. -# formatter - Formatter object, usually a obj -sub set_report_formatter { - my ( $self, %args ) = @_; - my @required_args = qw(report formatter); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless exists $args{$arg}; - } - my ($report, $formatter) = @args{@required_args}; - $self->{formatter_for}->{$report} = $formatter; - return; + return $self; } # Arguments: @@ -240,7 +258,7 @@ sub header { shorten(scalar keys %{$results->{classes}}, d=>1_000), shorten($qps || 0, d=>1_000), shorten($conc || 0, d=>1_000)); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); push @result, $line; # Second line: time range @@ -305,6 +323,70 @@ sub header { return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } +sub query_report_values { + my ($self, %args) = @_; + foreach my $arg ( qw(ea worst orderby groupby) ) { + die "I need a $arg argument" unless defined $arg; + } + my $ea = $args{ea}; + my $groupby = $args{groupby}; + my $worst = $args{worst}; + + my $q = $self->Quoter; + my $qv = $self->{QueryReview}; + my $qr = $self->{QueryRewriter}; + + my @values; + # Print each worst item: its stats/metrics (sum/min/max/95%/etc.), + # Query_time distro chart, tables, EXPLAIN, fingerprint, etc. + # Items are usually unique queries/fingerprints--depends on how + # the events were grouped. + ITEM: + foreach my $top_event ( @$worst ) { + my $item = $top_event->[0]; + my $reason = $args{explain_why} ? $top_event->[1] : ''; + my $rank = $top_event->[2]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + my $samp_query = $sample->{arg} || ''; + + my %item_vals = ( + item => $item, + samp_query => $samp_query, + rank => ($rank || 0), + reason => $reason, + ); + + # ############################################################### + # Possibly skip item for --review. + # ############################################################### + my $review_vals; + if ( $qv ) { + $review_vals = $qv->get_review_info($item); + next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_histogram}; + for my $col ( $qv->review_cols() ) { + $item_vals{review_vals}{$col} = $review_vals->{$col}; + } + } + + $item_vals{default_db} = $sample->{db} ? $sample->{db} + : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} + : undef; + $item_vals{tables} = [$self->{QueryParser}->extract_tables( + query => $samp_query, + default_db => $item_vals{default_db}, + Quoter => $self->Quoter, + )]; + + if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { + $item_vals{crc} = crc32($samp_query); + } + + push @values, \%item_vals; + } + return \@values; +} + # Arguments: # * ea obj: EventAggregator # * worst arrayref: worst items @@ -316,16 +398,11 @@ sub header { # * print_header bool: "Report grouped by" header sub query_report { my ( $self, %args ) = @_; - foreach my $arg ( qw(ea worst orderby groupby) ) { - die "I need a $arg argument" unless defined $arg; - } + my $ea = $args{ea}; my $groupby = $args{groupby}; - my $worst = $args{worst}; + my $report_values = $self->query_report_values(%args); - my $o = $self->{OptionParser}; - my $q = $self->{Quoter}; - my $qv = $self->{QueryReview}; my $qr = $self->{QueryRewriter}; my $report = ''; @@ -347,60 +424,36 @@ sub query_report { # Items are usually unique queries/fingerprints--depends on how # the events were grouped. ITEM: - foreach my $top_event ( @$worst ) { - my $item = $top_event->[0]; - my $reason = $args{explain_why} ? $top_event->[1] : ''; - my $rank = $top_event->[2]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; - my $samp_query = $sample->{arg} || ''; - - # ############################################################### - # Possibly skip item for --review. - # ############################################################### - my $review_vals; - if ( $qv ) { - $review_vals = $qv->get_review_info($item); - next ITEM if $review_vals->{reviewed_by} && !$o->get('report-all'); - } - - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - my @tables = $self->{QueryParser}->extract_tables( - query => $samp_query, - default_db => $default_db, - Quoter => $self->{Quoter}, - ); - + foreach my $vals ( @$report_values ) { + my $item = $vals->{item}; # ############################################################### # Print the standard query analysis report. # ############################################################### - $report .= "\n" if $rank > 1; # space between each event report + $report .= "\n" if $vals->{rank} > 1; # space between each event report $report .= $self->event_report( %args, item => $item, - sample => $sample, - rank => $rank, - reason => $reason, + sample => $ea->results->{samples}->{$item}, + rank => $vals->{rank}, + reason => $vals->{reason}, attribs => $attribs, - db => $default_db, + db => $vals->{default_db}, ); - if ( $o->get('report-histogram') ) { + if ( $self->{options}->{report_histogram} ) { $report .= $self->chart_distro( %args, - attrib => $o->get('report-histogram'), - item => $item, + attrib => $self->{options}->{report_histogram}, + item => $vals->{item}, ); } - if ( $qv && $review_vals ) { + if ( $vals->{review_vals} ) { # Print the review information that is already in the table # before putting anything new into the table. $report .= "# Review information\n"; - foreach my $col ( $qv->review_cols() ) { - my $val = $review_vals->{$col}; + foreach my $col ( keys %{$vals->{review_vals}} ) { + my $val = $vals->{review_vals}->{$col}; if ( !$val || $val ne '0000-00-00 00:00:00' ) { # issue 202 $report .= sprintf "# %13s: %-s\n", $col, ($val ? $val : ''); } @@ -409,23 +462,22 @@ sub query_report { if ( $groupby eq 'fingerprint' ) { # Shorten it if necessary (issue 216 and 292). - $samp_query = $qr->shorten($samp_query, $o->get('shorten')) - if $o->get('shorten'); + my $samp_query = $qr->shorten($vals->{samp_query}, $self->{options}->{shorten}) + if $self->{options}->{shorten}; # Print query fingerprint. - PTDEBUG && _d("Fingerprint\n# $item\n"); + PTDEBUG && _d("Fingerprint\n# $vals->{item}\n"); # Print tables used by query. - $report .= $self->tables_report(@tables); + $report .= $self->tables_report(@{$vals->{tables}}); # Print sample (worst) query's CRC % 1_000. We mod 1_000 because # that's actually the value stored in the ea, not the full checksum. # So the report will print something like, # # arg crc 685 (2/66%), 159 (1/33%) # Thus we want our "CRC" line to be 685 and not 18547302820. - if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { - my $crc = crc32($samp_query); - $report.= "# CRC " . ($crc ? $crc % 1_000 : "") . "\n"; + if ( $vals->{crc} ) { + $report.= "# CRC " . ($vals->{crc} % 1_000) . "\n"; } my $log_type = $args{log_type} || ''; @@ -439,7 +491,7 @@ sub query_report { } else { $report .= "# EXPLAIN /*!50100 PARTITIONS*/\n$samp_query${mark}\n"; - $report .= $self->explain_report($samp_query, $default_db); + $report .= $self->explain_report($samp_query, $vals->{default_db}); } } else { @@ -454,7 +506,7 @@ sub query_report { } else { if ( $groupby eq 'tables' ) { - my ( $db, $tbl ) = $q->split_unquote($item); + my ( $db, $tbl ) = $self->Quoter->split_unquote($item); $report .= $self->tables_report([$db, $tbl]); } $report .= "$item\n"; @@ -474,21 +526,20 @@ sub query_report { # * rank scalar: item rank among the worst # Print a report about the statistics in the EventAggregator. # Called by query_report(). -sub event_report { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item orderby) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; +sub event_report_values { + my ($self, %args) = @_; + + my $ea = $args{ea}; + my $item = $args{item}; my $orderby = $args{orderby}; my $results = $ea->results(); - my $o = $self->{OptionParser}; - my @result; + + my %vals; # Return unless the item exists in the results (it should). my $store = $results->{classes}->{$item}; - return "# No such event $item\n" unless $store; + + return unless $store; # Pick the first attribute to get counts my $global_cnt = $results->{globals}->{$orderby}->{cnt}; @@ -509,51 +560,26 @@ sub event_report { }; } - # First line like: - # Query 1: 9 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 5 ________ - my $line = sprintf( - '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', - ($ea->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), - $args{rank} || 0, - shorten($qps || 0, d=>1_000), - shorten($conc || 0, d=>1_000), - make_checksum($item), - $results->{samples}->{$item}->{pos_in_log} || 0, - ); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); - push @result, $line; - - # Second line: reason why this class is being reported. - if ( $args{reason} ) { - push @result, - "# This item is included in the report because it matches " - . ($args{reason} eq 'top' ? '--limit.' : '--outliers.'); - } - - # Third line: Variance-to-mean (V/M) ratio, like: - # Scores: V/M = 1.5 - { + $vals{groupby} = $ea->{groupby}; + $vals{qps} = $qps || 0; + $vals{concurrency} = $conc || 0; + $vals{checksum} = make_checksum($item); + $vals{pos_in_log} = $results->{samples}->{$item}->{pos_in_log} || 0; + $vals{reason} = $args{reason}; + $vals{variance_to_mean} = do { my $query_time = $ea->metrics(where => $item, attrib => 'Query_time'); - push @result, - sprintf("# Scores: V/M = %.2f", - ($query_time->{stddev}**2 / ($query_time->{avg} || 1)), - ); + $query_time->{stddev}**2 / ($query_time->{avg} || 1) + }; + + $vals{counts} = { + class_cnt => $class_cnt, + global_cnt => $global_cnt, + }; + + if ( my $ts = $store->{ts}) { + $vals{time_range} = $self->format_time_range($ts) || "unknown"; } - # Last line before column headers: time range - if ( my $ts = $store->{ts} ) { - my $time_range = $self->format_time_range($ts) || "unknown"; - push @result, "# Time range: $time_range"; - } - - # Column header line - push @result, $self->make_event_header(); - - # Count line - push @result, - sprintf $self->{num_format}, 'Count', - percentage_of($class_cnt, $global_cnt), $class_cnt, map { '' } (1..8); - # Sort the attributes, removing any hidden attributes, if they're not # already given to us. In mk-query-digest, this sub is called from # query_report(), but in testing it's called directly. query_report() @@ -566,11 +592,10 @@ sub event_report { ); } + $vals{attributes} = { map { $_ => [] } qw(num innodb bool string) }; + foreach my $type ( qw(num innodb) ) { # Add "InnoDB:" sub-header before grouped InnoDB_* attributes. - if ( $type eq 'innodb' && @{$attribs->{$type}} ) { - push @result, "# InnoDB:"; - }; NUM_ATTRIB: foreach my $attrib ( @{$attribs->{$type}} ) { @@ -590,15 +615,12 @@ sub event_report { $pct = percentage_of( $vals->{sum}, $results->{globals}->{$attrib}->{sum}); - push @result, - sprintf $self->{num_format}, - $self->make_label($attrib), $pct, @values; + push @{$vals{attributes}{$type}}, + [ $attrib, $pct, @values ]; } } if ( @{$attribs->{bool}} ) { - push @result, "# Boolean:"; - my $printed_bools = 0; BOOL_ATTRIB: foreach my $attrib ( @{$attribs->{bool}} ) { next BOOL_ATTRIB unless exists $store->{$attrib}; @@ -606,33 +628,125 @@ sub event_report { next unless scalar %$vals; if ( $vals->{sum} > 0 ) { - push @result, - sprintf $self->{bool_format}, - $self->make_label($attrib), $self->bool_percents($vals); - $printed_bools = 1; + push @{$vals{attributes}{bool}}, + [ $attrib, $self->bool_percents($vals) ]; } } - pop @result unless $printed_bools; } if ( @{$attribs->{string}} ) { - push @result, "# String:"; - my $printed_strings = 0; STRING_ATTRIB: foreach my $attrib ( @{$attribs->{string}} ) { next STRING_ATTRIB unless exists $store->{$attrib}; my $vals = $store->{$attrib}; next unless scalar %$vals; + push @{$vals{attributes}{string}}, + [ $attrib, $vals ]; + } + } + + + return \%vals; +} + +# TODO I maybe've broken the groupby report + +sub event_report { + my ( $self, %args ) = @_; + foreach my $arg ( qw(ea item orderby) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $item = $args{item}; + my $val = $self->event_report_values(%args); + my @result; + + return "# No such event $item\n" unless $val; + + # First line like: + # Query 1: 9 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 5 ________ + my $line = sprintf( + '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', + ($val->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), + $args{rank} || 0, + shorten($val->{qps}, d=>1_000), + shorten($val->{concurrency}, d=>1_000), + $val->{checksum}, + $val->{pos_in_log}, + ); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); + push @result, $line; + + # Second line: reason why this class is being reported. + if ( $val->{reason} ) { + push @result, + "# This item is included in the report because it matches " + . ($val->{reason} eq 'top' ? '--limit.' : '--outliers.'); + } + + # Third line: Variance-to-mean (V/M) ratio, like: + # Scores: V/M = 1.5 + push @result, + sprintf("# Scores: V/M = %.2f", $val->{variance_to_mean} ); + + # Last line before column headers: time range + if ( $val->{time_range} ) { + push @result, "# Time range: $val->{time_range}"; + } + + # Column header line + push @result, $self->make_event_header(); + + # Count line + push @result, + sprintf $self->{num_format}, 'Count', + percentage_of($val->{counts}{class_cnt}, $val->{counts}{global_cnt}), + $val->{counts}{class_cnt}, + map { '' } (1..8); + + + my $attribs = $val->{attributes}; + + foreach my $type ( qw(num innodb) ) { + # Add "InnoDB:" sub-header before grouped InnoDB_* attributes. + if ( $type eq 'innodb' && @{$attribs->{$type}} ) { + push @result, "# InnoDB:"; + }; + + NUM_ATTRIB: + foreach my $attrib ( @{$attribs->{$type}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{num_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{bool}} ) { + push @result, "# Boolean:"; + BOOL_ATTRIB: + foreach my $attrib ( @{$attribs->{bool}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{bool_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{string}} ) { + push @result, "# String:"; + STRING_ATTRIB: + foreach my $attrib ( @{$attribs->{string}} ) { + my ($attrib_name, $vals) = @$attrib; push @result, sprintf $self->{string_format}, - $self->make_label($attrib), - $self->format_string_list($attrib, $vals, $class_cnt); - $printed_strings = 1; + $self->make_label($attrib_name), + $self->format_string_list($attrib_name, $vals, $val->{counts}{class_cnt}); } - pop @result unless $printed_strings; } + return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } @@ -706,7 +820,6 @@ sub chart_distro { # Optional arguments: # * other arrayref: other items (that didn't make it into top worst) # * distill_args hashref: extra args for distill() -# * ReportFormatter obj: passed-in ReportFormatter for testing sub profile { my ( $self, %args ) = @_; foreach my $arg ( qw(ea worst groupby) ) { @@ -718,7 +831,6 @@ sub profile { my $groupby = $args{groupby}; my $qr = $self->{QueryRewriter}; - my $o = $self->{OptionParser}; # Total response time of all events. my $results = $ea->results(); @@ -746,12 +858,8 @@ sub profile { push @profiles, \%profile; } - my $report = $self->{formatter_for}->{profile} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Profile'); + my $report = $self->ReportFormatter(); + $report->title('Profile'); my @cols = ( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, @@ -817,7 +925,6 @@ sub profile { # * groupby scalar: attrib worst items grouped by # Optional arguments: # * distill_args hashref: extra args for distill() -# * ReportFormatter obj: passed-in ReportFormatter for testing sub prepared { my ( $self, %args ) = @_; foreach my $arg ( qw(ea worst groupby) ) { @@ -902,12 +1009,8 @@ sub prepared { # Return unless there are prepared statements to report. return unless scalar @prepared; - my $report = $self->{formatter_for}->{prepared} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Prepared statements'); + my $report = $self->ReportFormatter(); + $report->title('Prepared statements'); $report->set_columns( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, @@ -943,7 +1046,7 @@ sub make_global_header { # First line: # Attribute total min max avg 95% stddev median push @lines, - sprintf $self->{num_format}, "Attribute", '', @{$self->{global_headers}}; + sprintf $self->{num_format}, "Attribute", '', @{$self->global_headers()}; # Underline first line: # ========= ======= ======= ======= ======= ======= ======= ======= @@ -951,7 +1054,7 @@ sub make_global_header { # Hard-coded values aren't ideal but this code rarely changes. push @lines, sprintf $self->{num_format}, - (map { "=" x $_ } $self->{label_width}), + (map { "=" x $_ } $self->label_width()), (map { " " x $_ } qw(3)), # no pct column in global header (map { "=" x $_ } qw(7 7 7 7 7 7 7)); @@ -969,13 +1072,13 @@ sub make_event_header { my @lines; push @lines, - sprintf $self->{num_format}, "Attribute", @{$self->{event_headers}}; + sprintf $self->{num_format}, "Attribute", @{$self->event_headers()}; # The numbers 6, 7, 7, etc. are the field widths from make_header(). # Hard-coded values aren't ideal but this code rarely changes. push @lines, sprintf $self->{num_format}, - map { "=" x $_ } ($self->{label_width}, qw(3 7 7 7 7 7 7 7)); + map { "=" x $_ } ($self->label_width(), qw(3 7 7 7 7 7 7 7)); # End result should be like: # Attribute pct total min max avg 95% stddev median @@ -994,7 +1097,7 @@ sub make_label { if ( $val =~ m/^InnoDB/ ) { $val =~ s/^InnoDB //; $val = $val eq 'trx id' ? "InnoDB trxID" - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width()); } $val = $val eq 'user' ? 'Users' @@ -1005,7 +1108,7 @@ sub make_label { : $val eq 'bytes' ? 'Query size' : $val eq 'Tmp disk tables' ? 'Tmp disk tbl' : $val eq 'Tmp table sizes' ? 'Tmp tbl size' - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width); return $val; } @@ -1023,8 +1126,7 @@ sub bool_percents { # Does pretty-printing for lists of strings like users, hosts, db. sub format_string_list { my ( $self, $attrib, $vals, $class_cnt ) = @_; - my $o = $self->{OptionParser}; - my $show_all = $o->get('show-all'); + my $show_all = $self->{options}->{show_all}; # Only class result values have unq. So if unq doesn't exist, # then we've been given global values. @@ -1164,7 +1266,7 @@ sub pref_sort { sub tables_report { my ( $self, @tables ) = @_; return '' unless @tables; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $tables = ""; foreach my $db_tbl ( @tables ) { my ( $db, $tbl ) = @$db_tbl; @@ -1183,7 +1285,7 @@ sub explain_report { return '' unless $query; my $dbh = $self->{dbh}; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $qp = $self->{QueryParser}; return '' unless $dbh && $q && $qp; diff --git a/t/lib/QueryReportFormatter.t b/t/lib/QueryReportFormatter.t index a3834d4c..66442c63 100644 --- a/t/lib/QueryReportFormatter.t +++ b/t/lib/QueryReportFormatter.t @@ -43,7 +43,6 @@ my $o = new OptionParser(description=>'qrf'); my $ex = new ExplainAnalyzer(QueryRewriter => $qr, QueryParser => $qp); $o->get_specs("$trunk/bin/pt-query-digest"); - my $qrf = new QueryReportFormatter( OptionParser => $o, QueryRewriter => $qr, @@ -885,6 +884,13 @@ ok( # Test show_all. @ARGV = qw(--show-all host); $o->get_opts(); +$qrf = new QueryReportFormatter( + OptionParser => $o, + QueryRewriter => $qr, + QueryParser => $qp, + Quoter => $q, + ExplainAnalyzer => $ex, +); $result = $qrf->event_report( ea => $ea, select => [ qw(Query_time host) ], @@ -971,7 +977,13 @@ $ea->calculate_statistical_metrics(apdex_t=>1); # Reset opts in case anything above left something set. @ARGV = qw(); $o->get_opts(); - +$qrf = new QueryReportFormatter( + OptionParser => $o, + QueryRewriter => $qr, + QueryParser => $qp, + Quoter => $q, + ExplainAnalyzer => $ex, +); # Normally, the report subs will make their own ReportFormatter but # that package isn't visible to QueryReportFormatter right now so we # make ReportFormatters and pass them in. Since ReporFormatters can't @@ -980,7 +992,7 @@ $o->get_opts(); # profile subreport. And the line width is 82 because that's the new # default to accommodate the EXPLAIN sparkline (issue 1141). my $report = new ReportFormatter(line_width=>82); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); +$qrf->{formatter} = $report; ok( no_diff( sub { $qrf->print_reports( @@ -997,8 +1009,6 @@ ok( "print_reports(header, query_report, profile)" ); -$report = new ReportFormatter(line_width=>82); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); ok( no_diff( sub { $qrf->print_reports( @@ -1051,11 +1061,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'prepared', formatter=>$report); ok( no_diff( sub { @@ -1094,11 +1099,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); ok( no_diff( sub { @@ -1130,7 +1130,13 @@ SKIP: { @ARGV = qw(--explain F=/tmp/12345/my.sandbox.cnf); $o->get_opts(); - + $qrf = new QueryReportFormatter( + OptionParser => $o, + QueryRewriter => $qr, + QueryParser => $qp, + Quoter => $q, + ExplainAnalyzer => $ex, + ); my $qrf = new QueryReportFormatter( OptionParser => $o, QueryRewriter => $qr, @@ -1151,56 +1157,6 @@ SKIP: { "explain_report()" ); - my $arg = "select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i"; - my $fingerprint = $qr->fingerprint($arg); - - $events = [ - { - Query_time => '0.000286', - arg => $arg, - fingerprint => $fingerprint, - bytes => length $arg, - cmd => 'Query', - db => 'qrf', - pos_in_log => 0, - ts => '091208 09:23:49.637394', - }, - ]; - $ea = new EventAggregator( - groupby => 'fingerprint', - worst => 'Query_time', - ); - foreach my $event ( @$events ) { - $ea->aggregate($event); - } - $ea->calculate_statistical_metrics(); - - $dbh->do("USE mysql"); - $report = new ReportFormatter( - line_width => 82, - extend_right => 1, - ); - $qrf->set_report_formatter(report=>'profile', formatter=>$report); - $dbh->do("USE mysql"); # same reason as above ^; force use db from event - ok( - no_diff( - sub { - $qrf->print_reports( - reports => ['profile', 'query_report'], - ea => $ea, - worst => [ [$fingerprint, 'top', 1], ], - other => [ [$fingerprint, 'misc', 2], ], - orderby => 'Query_time', - groupby => 'fingerprint', - ); - }, - ( $sandbox_version eq '5.6' ? "t/lib/samples/QueryReportFormatter/report032.txt" - : $sandbox_version ge '5.1' ? "t/lib/samples/QueryReportFormatter/report027.txt" - : "t/lib/samples/QueryReportFormatter/report029.txt"), - ), - "EXPLAIN sparkline (issue 1141)" - ); - $sb->wipe_clean($dbh); $dbh->disconnect(); } @@ -1251,7 +1207,6 @@ foreach my $event ( @$events ) { $ea->calculate_statistical_metrics(); @ARGV = qw(); $o->get_opts(); -$report = new ReportFormatter(line_width=>82); $qrf = new QueryReportFormatter( OptionParser => $o, QueryRewriter => $qr, @@ -1259,7 +1214,6 @@ $qrf = new QueryReportFormatter( Quoter => $q, ExplainAnalyzer => $ex, ); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); my $output = output( sub { $qrf->print_reports( reports => [qw(rusage date files header query_report profile)], @@ -1323,11 +1277,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'profile', formatter=>$report); ok( no_diff( sub { @@ -1376,11 +1325,6 @@ foreach my $event ( @$events ) { $ea->aggregate($event); } $ea->calculate_statistical_metrics(); -$report = new ReportFormatter( - line_width => 82, - extend_right => 1, -); -$qrf->set_report_formatter(report=>'prepared', formatter=>$report); ok( no_diff( sub { diff --git a/t/lib/samples/QueryReportFormatter/report027.txt b/t/lib/samples/QueryReportFormatter/report027.txt deleted file mode 100644 index fb3bc4ba..00000000 --- a/t/lib/samples/QueryReportFormatter/report027.txt +++ /dev/null @@ -1,58 +0,0 @@ - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 NS 0.00 TF>aI SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 NS 0.0 MISC <1 ITEMS> - -# Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aI -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2009-12-08 09:23:49.637394 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 286us 286us 286us 286us 286us 0 286us -# Query size 100 90 90 90 90 90 0 90 -# String: -# cmd Query -# Databases qrf -# Query_time distribution -# 1us -# 10us -# 100us ################################################################ -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS FROM `qrf` LIKE 't'\G -# SHOW CREATE TABLE `qrf`.`t`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i\G -# *************************** 1. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t1 -# partitions: NULL -# type: ALL -# possible_keys: PRIMARY -# key: NULL -# key_len: NULL -# ref: NULL -# rows: 4 -# Extra: Using where; Using temporary; Using filesort -# *************************** 2. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t2 -# partitions: NULL -# type: index -# possible_keys: PRIMARY -# key: PRIMARY -# key_len: 4 -# ref: NULL -# rows: 4 -# Extra: Using where; Using index; Using join buffer diff --git a/t/lib/samples/QueryReportFormatter/report029.txt b/t/lib/samples/QueryReportFormatter/report029.txt deleted file mode 100644 index 08605a95..00000000 --- a/t/lib/samples/QueryReportFormatter/report029.txt +++ /dev/null @@ -1,56 +0,0 @@ - -# Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 NS 0.00 TF>aI SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 NS 0.0 MISC <1 ITEMS> - -# Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: Apdex = NS [0.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aI -# Query_time sparkline: | ^ | -# Time range: all events occurred at 2009-12-08 09:23:49.637394 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 286us 286us 286us 286us 286us 0 286us -# Query size 100 90 90 90 90 90 0 90 -# String: -# cmd Query -# Databases qrf -# Query_time distribution -# 1us -# 10us -# 100us ################################################################ -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS FROM `qrf` LIKE 't'\G -# SHOW CREATE TABLE `qrf`.`t`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i\G -# *************************** 1. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t1 -# type: ALL -# possible_keys: PRIMARY -# key: NULL -# key_len: NULL -# ref: NULL -# rows: 4 -# Extra: Using where; Using temporary; Using filesort -# *************************** 2. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t2 -# type: index -# possible_keys: PRIMARY -# key: PRIMARY -# key_len: 4 -# ref: NULL -# rows: 4 -# Extra: Using where; Using index diff --git a/t/lib/samples/QueryReportFormatter/report032.txt b/t/lib/samples/QueryReportFormatter/report032.txt deleted file mode 100644 index f46eb104..00000000 --- a/t/lib/samples/QueryReportFormatter/report032.txt +++ /dev/null @@ -1,56 +0,0 @@ - -# Profile -# Rank Query ID Response time Calls R/Call V/M Item -# ==== ================== ============= ===== ====== ===== ========= -# 1 0x46F81B022F1AD76B 0.0003 100.0% 1 0.0003 0.00 SELECT t -# MISC 0xMISC 0.0003 100.0% 1 0.0003 0.0 <1 ITEMS> - -# Query 1: 0 QPS, 0x concurrency, ID 0x46F81B022F1AD76B at byte 0 ________ -# Scores: V/M = 0.00 -# Time range: all events occurred at 2009-12-08 09:23:49.637394 -# Attribute pct total min max avg 95% stddev median -# ============ === ======= ======= ======= ======= ======= ======= ======= -# Count 100 1 -# Exec time 100 286us 286us 286us 286us 286us 0 286us -# Query size 100 90 90 90 90 90 0 90 -# String: -# cmd Query -# Databases qrf -# Query_time distribution -# 1us -# 10us -# 100us ################################################################ -# 1ms -# 10ms -# 100ms -# 1s -# 10s+ -# Tables -# SHOW TABLE STATUS FROM `qrf` LIKE 't'\G -# SHOW CREATE TABLE `qrf`.`t`\G -# EXPLAIN /*!50100 PARTITIONS*/ -select t1.i from t as t1 join t as t2 where t1.i < t2.i and t1.v is not null order by t1.i\G -# *************************** 1. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t1 -# partitions: NULL -# type: index -# possible_keys: PRIMARY -# key: PRIMARY -# key_len: 4 -# ref: NULL -# rows: 4 -# Extra: Using where -# *************************** 2. row *************************** -# id: 1 -# select_type: SIMPLE -# table: t2 -# partitions: NULL -# type: ALL -# possible_keys: PRIMARY -# key: NULL -# key_len: NULL -# ref: NULL -# rows: 4 -# Extra: Range checked for each record (index map: 0x1) From 3c63750c9b86ec267876f9db23b8210055230b63 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 14 Jan 2013 13:20:35 -0300 Subject: [PATCH 11/34] Added JSONReportFormatter, a subclass of QueryReportFormatter that does what it says on the tin. WIP as it does not report profile or prepared statement data yet --- lib/JSONReportFormatter.pm | 84 +++++++++++++++++++ t/pt-query-digest/output.t | 51 +++++++++++ .../samples/output_json_slow002.txt | 2 + .../samples/output_json_tcpdump021.txt | 2 + 4 files changed, 139 insertions(+) create mode 100644 lib/JSONReportFormatter.pm create mode 100644 t/pt-query-digest/output.t create mode 100644 t/pt-query-digest/samples/output_json_slow002.txt create mode 100644 t/pt-query-digest/samples/output_json_tcpdump021.txt diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm new file mode 100644 index 00000000..af3e2546 --- /dev/null +++ b/lib/JSONReportFormatter.pm @@ -0,0 +1,84 @@ +{ +package JSONReportFormatter; +use Mo; +use JSON; + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +extends qw(QueryReportFormatter); + +override [qw(rusage date hostname files header profile prepared)] => sub { + return; +}; + +override event_report => sub { + my ($self, %args) = @_; + return $self->event_report_values(%args); +}; + +override query_report => sub { + my ($self, %args) = @_; + foreach my $arg ( qw(ea worst orderby groupby) ) { + die "I need a $arg argument" unless defined $arg; + } + my $ea = $args{ea}; + my $groupby = $args{groupby}; + my $worst = $args{worst}; + + my $q = $self->Quoter; + my $qv = $self->{QueryReview}; + my $qr = $self->{QueryRewriter}; + + my $query_report_vals = $self->query_report_values(%args); + + # Sort the attributes, removing any hidden attributes. + my $attribs = $self->sort_attribs( + ($args{select} ? $args{select} : $ea->get_attributes()), + $ea, + ); + + ITEM: + foreach my $vals ( @$query_report_vals ) { + my $item = $vals->{item}; + my $samp_query = $vals->{samp_query}; + # ############################################################### + # Print the standard query analysis report. + # ############################################################### + $vals->{event_report} = $self->event_report( + %args, + item => $item, + sample => $ea->results->{samples}->{$item}, + rank => $vals->{rank}, + reason => $vals->{reason}, + attribs => $attribs, + db => $vals->{default_db}, + ); + + if ( $groupby eq 'fingerprint' ) { + if ( $item =~ m/^(?:[\(\s]*select|insert|replace)/ ) { + if ( $item !~ m/^(?:insert|replace)/ ) { # No EXPLAIN + $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$samp_query\\G\n"; + $vals->{explain_report} = $self->explain_report($samp_query, $vals->{default_db}); + } + } + else { + my $converted = $qr->convert_to_select($samp_query); + if ( $converted + && $converted =~ m/^[\(\s]*select/i ) { + $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$converted\\G\n"; + } + } + } + else { + if ( $groupby eq 'tables' ) { + my ( $db, $tbl ) = $q->split_unquote($item); + $vals->{tables_report} = $self->tables_report([$db, $tbl]); + } + } + } + + return encode_json($query_report_vals) . "\n"; +}; + +1; +} diff --git a/t/pt-query-digest/output.t b/t/pt-query-digest/output.t new file mode 100644 index 00000000..9af5b9a8 --- /dev/null +++ b/t/pt-query-digest/output.t @@ -0,0 +1,51 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use PerconaTest; +require "$trunk/bin/pt-query-digest"; + +my @args = qw(--output json); +my $sample = "$trunk/t/lib/samples"; +my $results = "t/pt-query-digest/samples"; + +ok( + no_diff( + sub { pt_query_digest::main(@args, "$sample/slowlogs/empty") }, + "$results/empty_report.txt", + ), + 'json output for empty log' +); + +ok( + no_diff( + sub { pt_query_digest::main(@args, "$sample/slowlogs/slow002.txt") }, + "$results/output_json_slow002.txt" + ), + 'json output for slow002' +); + +# --type tcpdump + +ok( + no_diff( + sub { pt_query_digest::main(qw(--type tcpdump --limit 10 --watch-server 127.0.0.1:12345), + @args, "$sample/tcpdump/tcpdump021.txt") }, + "$results/output_json_tcpdump021.txt", + ), + 'json output for for tcpdump021', +); + +# ############################################################################# +# Done. +# ############################################################################# +done_testing; diff --git a/t/pt-query-digest/samples/output_json_slow002.txt b/t/pt-query-digest/samples/output_json_slow002.txt new file mode 100644 index 00000000..adeae63b --- /dev/null +++ b/t/pt-query-digest/samples/output_json_slow002.txt @@ -0,0 +1,2 @@ + +[{"event_report":{"time_range":"all events occurred at 2007-12-18 11:48:27","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":8},"concurrency":0,"checksum":"66825DDC008FFA89","pos_in_log":338,"attributes":{"bool":[["Full_scan","100","0"]],"innodb":[],"num":[["Query_time","95","726ms","726ms","726ms","726ms","726ms",0,"726ms"],["Lock_time","29","91us","91us","91us","91us","91us",0,"91us"],["Rows_sent","0","0","0","0","0","0","0","0"],["Rows_examined","100","61.48k","61.48k","61.48k","61.48k","61.48k","0","61.48k"],["Merge_passes","0","0","0","0","0","0","0","0"],["bytes","25","129","129","129","129","129","0","129"]],"string":[["db",{"min":"db1","max":"db1","unq":{"db1":1},"cnt":1}],["host",{"min":"","max":"","unq":{"":1},"cnt":1}],["user",{"min":"[SQL_SLAVE]","max":"[SQL_SLAVE]","unq":{"[SQL_SLAVE]":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"update d?tuningdetail_?_? n inner join d?gonzo a using(gonzo) set n.column? = a.column?, n.word? = a.word?","samp_query":"update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3","for_explain":"EXPLAIN /*!50100 PARTITIONS*/\nselect n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \\G\n","tables":[["db2","tuningdetail_21_265507"],["db1","gonzo"]],"rank":1,"default_db":"db1"}] diff --git a/t/pt-query-digest/samples/output_json_tcpdump021.txt b/t/pt-query-digest/samples/output_json_tcpdump021.txt new file mode 100644 index 00000000..dad915c0 --- /dev/null +++ b/t/pt-query-digest/samples/output_json_tcpdump021.txt @@ -0,0 +1,2 @@ + +[{"event_report":{"time_range":"all events occurred at 2009-12-08 09:23:49.637394","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":3},"concurrency":0,"checksum":"AA8E9FA785927259","pos_in_log":0,"attributes":{"bool":[],"innodb":[],"num":[["Query_time","50","286us","286us","286us","286us","286us",0,"286us"],["Rows_affected","0","0","0","0","0","0","0","0"],["bytes","35","35","35","35","35","35","0","35"],["Warning_count","0","0","0","0","0","0","0","0"]],"string":[["Error_no",{"min":"none","max":"none","unq":{"none":1},"cnt":1}],["host",{"min":"127.0.0.1","max":"127.0.0.1","unq":{"127.0.0.1":1},"cnt":1}],["Statement_id",{"min":2,"max":2,"unq":{"2":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"prepare select i from d.t where i=?","samp_query":"PREPARE SELECT i FROM d.t WHERE i=?","for_explain":"EXPLAIN /*!50100 PARTITIONS*/\nSELECT i FROM d.t WHERE i=?\\G\n","tables":[["d","t"]],"rank":1,"default_db":null},{"event_report":{"time_range":"all events occurred at 2009-12-08 09:23:49.637892","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":3},"concurrency":0,"checksum":"3F79759E7FA2F117","pos_in_log":1106,"attributes":{"bool":[["No_index_used","100","0"]],"innodb":[],"num":[["Query_time","49","281us","281us","281us","281us","281us",0,"281us"],["Rows_affected","0","0","0","0","0","0","0","0"],["bytes","37","37","37","37","37","37","0","37"],["Warning_count","0","0","0","0","0","0","0","0"]],"string":[["Error_no",{"min":"none","max":"none","unq":{"none":1},"cnt":1}],["host",{"min":"127.0.0.1","max":"127.0.0.1","unq":{"127.0.0.1":1},"cnt":1}],["Statement_id",{"min":"2","max":"2","unq":{"2":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"execute select i from d.t where i=?","samp_query":"EXECUTE SELECT i FROM d.t WHERE i=\"3\"","for_explain":"EXPLAIN /*!50100 PARTITIONS*/\nSELECT i FROM d.t WHERE i=\"3\"\\G\n","tables":[["d","t"]],"rank":2,"default_db":null},{"samp_query":"administrator command: Quit","tables":[],"event_report":{"time_range":"all events occurred at 2009-12-08 09:23:49.638381","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":3},"concurrency":0,"checksum":"AA353644DE4C4CB4","pos_in_log":1850,"attributes":{"bool":[],"innodb":[],"num":[["Query_time","0",0,0,0,0,0,0,0],["Rows_affected","0","0","0","0","0","0","0","0"],["bytes","27","27","27","27","27","27","0","27"],["Warning_count","0","0","0","0","0","0","0","0"]],"string":[["Error_no",{"min":"none","max":"none","unq":{"none":1},"cnt":1}],["host",{"min":"127.0.0.1","max":"127.0.0.1","unq":{"127.0.0.1":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"administrator command: Quit","rank":3,"default_db":null}] From 7cae1753375e53c0b368d2633f142b53af8d044b Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 14 Jan 2013 13:22:03 -0300 Subject: [PATCH 12/34] pqd: --output json --- bin/pt-query-digest | 1396 ++++++++++++++++++++++++++++++------------- 1 file changed, 982 insertions(+), 414 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 5208a27e..318217c5 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -14,6 +14,7 @@ use warnings FATAL => 'all'; BEGIN { $INC{$_} = __FILE__ for map { (my $pkg = "$_.pm") =~ s!::!/!g; $pkg } (qw( Percona::Toolkit + Mo DSNParser Quoter OptionParser @@ -29,6 +30,7 @@ BEGIN { EventAggregator ReportFormatter QueryReportFormatter + JSONReportFormatter EventTimeline QueryParser TableParser @@ -44,7 +46,6 @@ BEGIN { MasterSlave Progress FileIterator - ExplainAnalyzer Runtime Pipeline VersionCheck @@ -71,6 +72,468 @@ our $VERSION = '2.1.7'; # End Percona::Toolkit package # ########################################################################### +# ########################################################################### +# Mo package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Mo.pm +# t/lib/Mo.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +BEGIN { +$INC{"Mo.pm"} = __FILE__; +package Mo; +our $VERSION = '0.30_Percona'; # Forked from 0.30 of Mo. + +{ + no strict 'refs'; + sub _glob_for { + return \*{shift()} + } + + sub _stash_for { + return \%{ shift() . "::" }; + } +} + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + + +our %TYPES = ( + Bool => sub { !$_[0] || (defined $_[0] && looks_like_number($_[0]) && $_[0] == 1) }, + Num => sub { defined $_[0] && looks_like_number($_[0]) }, + Int => sub { defined $_[0] && looks_like_number($_[0]) && $_[0] == int($_[0]) }, + Str => sub { defined $_[0] }, + Object => sub { defined $_[0] && blessed($_[0]) }, + FileHandle => sub { local $@; require IO::Handle; fileno($_[0]) && $_[0]->opened }, + + map { + my $type = /R/ ? $_ : uc $_; + $_ . "Ref" => sub { ref $_[0] eq $type } + } qw(Array Code Hash Regexp Glob Scalar) +); + +our %metadata_for; +{ + package Mo::Object; + + sub new { + my $class = shift; + my $args = $class->BUILDARGS(@_); + + my @args_to_delete; + while ( my ($attr, $meta) = each %{$metadata_for{$class}} ) { + next unless exists $meta->{init_arg}; + my $init_arg = $meta->{init_arg}; + + if ( defined $init_arg ) { + $args->{$attr} = delete $args->{$init_arg}; + } + else { + push @args_to_delete, $attr; + } + } + + delete $args->{$_} for @args_to_delete; + + for my $attribute ( keys %$args ) { + if ( my $coerce = $metadata_for{$class}{$attribute}{coerce} ) { + $args->{$attribute} = $coerce->($args->{$attribute}); + } + if ( my $I = $metadata_for{$class}{$attribute}{isa} ) { + ( (my $I_name), $I ) = @{$I}; + Mo::_check_type_constaints($attribute, $I, $I_name, $args->{$attribute}); + } + } + + while ( my ($attribute, $meta) = each %{$metadata_for{$class}} ) { + next unless $meta->{required}; + Carp::confess("Attribute ($attribute) is required for $class") + if ! exists $args->{$attribute} + } + + @_ = %$args; + my $self = bless $args, $class; + + my @build_subs; + my $linearized_isa = mro::get_linear_isa($class); + + for my $isa_class ( @$linearized_isa ) { + unshift @build_subs, *{ Mo::_glob_for "${isa_class}::BUILD" }{CODE}; + } + exists &$_ && $_->( $self, @_ ) for grep { defined } @build_subs; + return $self; + } + + sub BUILDARGS { + shift; + my $ref; + if ( @_ == 1 && ref($_[0]) ) { + Carp::confess("Single parameters to new() must be a HASH ref") + unless ref($_[0]) eq ref({}); + $ref = {%{$_[0]}} # We want a new reference, always + } + else { + $ref = { @_ }; + } + return $ref; + } +} + +my %export_for; +sub Mo::import { + warnings->import(qw(FATAL all)); + strict->import(); + + my $caller = scalar caller(); # Caller's package + my $caller_pkg = $caller . "::"; # Caller's package with :: at the end + my (%exports, %options); + + my (undef, @features) = @_; + my %ignore = ( map { $_ => 1 } qw( is isa init_arg builder buildargs clearer predicate build handles default required ) ); + for my $feature (grep { !$ignore{$_} } @features) { + { local $@; require "Mo/$feature.pm"; } + { + no strict 'refs'; + &{"Mo::${feature}::e"}( + $caller_pkg, + \%exports, + \%options, + \@_ + ); + } + } + + return if $exports{M}; + + %exports = ( + extends => sub { + for my $class ( map { "$_" } @_ ) { + $class =~ s{::|'}{/}g; + { local $@; eval { require "$class.pm" } } # or warn $@; + } + _set_package_isa($caller, @_); + _set_inherited_metadata($caller); + }, + override => \&override, + has => sub { + my $names = shift; + for my $attribute ( ref $names ? @$names : $names ) { + my %args = @_; + my $method = ($args{is} || '') eq 'ro' + ? sub { + Carp::confess("Cannot assign a value to a read-only accessor at reader ${caller_pkg}${attribute}") + if $#_; + return $_[0]{$attribute}; + } + : sub { + return $#_ + ? $_[0]{$attribute} = $_[1] + : $_[0]{$attribute}; + }; + + $metadata_for{$caller}{$attribute} = (); + + if ( my $I = $args{isa} ) { + my $orig_I = $I; + my $type; + if ( $I =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $I = _nested_constraints($attribute, $1, $2); + } + $metadata_for{$caller}{$attribute}{isa} = [$orig_I, $I]; + my $orig_method = $method; + $method = sub { + if ( $#_ ) { + Mo::_check_type_constaints($attribute, $I, $orig_I, $_[1]); + } + goto &$orig_method; + }; + } + + if ( my $builder = $args{builder} ) { + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$builder + : goto &$original_method + }; + } + + if ( my $code = $args{default} ) { + Carp::confess("${caller}::${attribute}'s default is $code, but should be a coderef") + unless ref($code) eq 'CODE'; + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$code + : goto &$original_method + }; + } + + if ( my $role = $args{does} ) { + my $original_method = $method; + $method = sub { + if ( $#_ ) { + Carp::confess(qq) + unless Scalar::Util::blessed($_[1]) && eval { $_[1]->does($role) } + } + goto &$original_method + }; + } + + if ( my $coercion = $args{coerce} ) { + $metadata_for{$caller}{$attribute}{coerce} = $coercion; + my $original_method = $method; + $method = sub { + if ( $#_ ) { + return $original_method->($_[0], $coercion->($_[1])) + } + goto &$original_method; + } + } + + $method = $options{$_}->($method, $attribute, @_) + for sort keys %options; + + *{ _glob_for "${caller}::$attribute" } = $method; + + if ( $args{required} ) { + $metadata_for{$caller}{$attribute}{required} = 1; + } + + if ($args{clearer}) { + *{ _glob_for "${caller}::$args{clearer}" } + = sub { delete shift->{$attribute} } + } + + if ($args{predicate}) { + *{ _glob_for "${caller}::$args{predicate}" } + = sub { exists shift->{$attribute} } + } + + if ($args{handles}) { + _has_handles($caller, $attribute, \%args); + } + + if (exists $args{init_arg}) { + $metadata_for{$caller}{$attribute}{init_arg} = $args{init_arg}; + } + } + }, + %exports, + ); + + $export_for{$caller} = [ keys %exports ]; + + for my $keyword ( keys %exports ) { + *{ _glob_for "${caller}::$keyword" } = $exports{$keyword} + } + *{ _glob_for "${caller}::extends" }{CODE}->( "Mo::Object" ) + unless @{ *{ _glob_for "${caller}::ISA" }{ARRAY} || [] }; +}; + +sub _check_type_constaints { + my ($attribute, $I, $I_name, $val) = @_; + ( ref($I) eq 'CODE' + ? $I->($val) + : (ref $val eq $I + || ($val && $val eq $I) + || (exists $TYPES{$I} && $TYPES{$I}->($val))) + ) + || Carp::confess( + qq + . qq + . (defined $val ? Mo::Dumper($val) : 'undef') ) +} + +sub _has_handles { + my ($caller, $attribute, $args) = @_; + my $handles = $args->{handles}; + + my $ref = ref $handles; + my $kv; + if ( $ref eq ref [] ) { + $kv = { map { $_,$_ } @{$handles} }; + } + elsif ( $ref eq ref {} ) { + $kv = $handles; + } + elsif ( $ref eq ref qr// ) { + Carp::confess("Cannot delegate methods based on a Regexp without a type constraint (isa)") + unless $args->{isa}; + my $target_class = $args->{isa}; + $kv = { + map { $_, $_ } + grep { $_ =~ $handles } + grep { !exists $Mo::Object::{$_} && $target_class->can($_) } + grep { $_ ne 'has' && $_ ne 'extends' } + keys %{ _stash_for $target_class } + }; + } + else { + Carp::confess("handles for $ref not yet implemented"); + } + + while ( my ($method, $target) = each %{$kv} ) { + my $name = _glob_for "${caller}::$method"; + Carp::confess("You cannot overwrite a locally defined method ($method) with a delegation") + if defined &$name; + + my ($target, @curried_args) = ref($target) ? @$target : $target; + *$name = sub { + my $self = shift; + my $delegate_to = $self->$attribute(); + my $error = "Cannot delegate $method to $target because the value of $attribute"; + Carp::confess("$error is not defined") unless $delegate_to; + Carp::confess("$error is not an object (got '$delegate_to')") + unless Scalar::Util::blessed($delegate_to) || (!ref($delegate_to) && $delegate_to->can($target)); + return $delegate_to->$target(@curried_args, @_); + } + } +} + +sub _nested_constraints { + my ($attribute, $aggregate_type, $type) = @_; + + my $inner_types; + if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $inner_types = _nested_constraints($1, $2); + } + else { + $inner_types = $TYPES{$type}; + } + + if ( $aggregate_type eq 'ArrayRef' ) { + return sub { + my ($val) = @_; + return unless ref($val) eq ref([]); + + if ($inner_types) { + for my $value ( @{$val} ) { + return unless $inner_types->($value) + } + } + else { + for my $value ( @{$val} ) { + return unless $value && ($value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type))); + } + } + return 1; + }; + } + elsif ( $aggregate_type eq 'Maybe' ) { + return sub { + my ($value) = @_; + return 1 if ! defined($value); + if ($inner_types) { + return unless $inner_types->($value) + } + else { + return unless $value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type)); + } + return 1; + } + } + else { + Carp::confess("Nested aggregate types are only implemented for ArrayRefs and Maybe"); + } +} + +sub _set_package_isa { + my ($package, @new_isa) = @_; + + *{ _glob_for "${package}::ISA" } = [@new_isa]; +} + +sub _set_inherited_metadata { + my $class = shift; + my $linearized_isa = mro::get_linear_isa($class); + my %new_metadata; + + for my $isa_class (reverse @$linearized_isa) { + %new_metadata = ( + %new_metadata, + %{ $metadata_for{$isa_class} || {} }, + ); + } + $metadata_for{$class} = \%new_metadata; +} + +sub unimport { + my $caller = scalar caller(); + my $stash = _stash_for( $caller ); + + delete $stash->{$_} for @{$export_for{$caller}}; +} + +sub Dumper { + require Data::Dumper; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Sortkeys = 0; + local $Data::Dumper::Quotekeys = 0; + local $Data::Dumper::Terse = 1; + + Data::Dumper::Dumper(@_) +} + +BEGIN { + if ($] >= 5.010) { + { local $@; require mro; } + } + else { + local $@; + eval { + require MRO::Compat; + } or do { + *mro::get_linear_isa = *mro::get_linear_isa_dfs = sub { + no strict 'refs'; + + my $classname = shift; + + my @lin = ($classname); + my %stored; + foreach my $parent (@{"$classname\::ISA"}) { + my $plin = mro::get_linear_isa_dfs($parent); + foreach (@$plin) { + next if exists $stored{$_}; + push(@lin, $_); + $stored{$_} = 1; + } + } + return \@lin; + }; + } + } +} + +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + +} +1; +} +# ########################################################################### +# End Mo package +# ########################################################################### + # ########################################################################### # DSNParser package # This package is a copy without comments from the original. The original @@ -5939,8 +6402,7 @@ sub _d { { package ReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use constant PTDEBUG => $ENV{PTDEBUG} || 0; @@ -5950,40 +6412,102 @@ use POSIX qw(ceil); eval { require Term::ReadKey }; my $have_term = $EVAL_ERROR ? 0 : 1; -sub new { - my ( $class, %args ) = @_; - my @required_args = qw(); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless $args{$arg}; - } - my $self = { - underline_header => 1, - line_prefix => '# ', - line_width => 78, - column_spacing => ' ', - extend_right => 0, - truncate_line_mark => '...', - column_errors => 'warn', - truncate_header_side => 'left', - strip_whitespace => 1, - %args, # args above can be overriden, args below cannot - n_cols => 0, - }; - if ( ($self->{line_width} || '') eq 'auto' ) { +has underline_header => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has line_prefix => ( + is => 'ro', + isa => 'Str', + default => sub { '# ' }, +); +has line_width => ( + is => 'ro', + isa => 'Int', + default => sub { 78 }, +); +has column_spacing => ( + is => 'ro', + isa => 'Str', + default => sub { ' ' }, +); +has extend_right => ( + is => 'ro', + isa => 'Bool', + default => sub { '' }, +); +has truncate_line_mark => ( + is => 'ro', + isa => 'Str', + default => sub { '...' }, +); +has column_errors => ( + is => 'ro', + isa => 'Str', + default => sub { 'warn' }, +); +has truncate_header_side => ( + is => 'ro', + isa => 'Str', + default => sub { 'left' }, +); +has strip_whitespace => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has title => ( + is => 'rw', + isa => 'Str', + predicate => 'has_title', +); + + +has n_cols => ( + is => 'rw', + isa => 'Int', + default => sub { 0 }, + init_arg => undef, +); + +has cols => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_cols', +); + +has lines => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_lines', +); + +has truncate_headers => ( + is => 'rw', + isa => 'Bool', + default => sub { undef }, + init_arg => undef, + clearer => 'clear_truncate_headers', +); + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + if ( ($args->{line_width} || '') eq 'auto' ) { die "Cannot auto-detect line width because the Term::ReadKey module " . "is not installed" unless $have_term; - ($self->{line_width}) = GetTerminalSize(); + ($args->{line_width}) = GetTerminalSize(); + PTDEBUG && _d('Line width:', $args->{line_width}); } - PTDEBUG && _d('Line width:', $self->{line_width}); - return bless $self, $class; -} - -sub set_title { - my ( $self, $title ) = @_; - $self->{title} = $title; - return; + return $args; } sub set_columns { @@ -5999,7 +6523,7 @@ sub set_columns { die "Column does not have a name" unless defined $col_name; if ( $col->{width} ) { - $col->{width_pct} = ceil(($col->{width} * 100) / $self->{line_width}); + $col->{width_pct} = ceil(($col->{width} * 100) / $self->line_width()); PTDEBUG && _d('col:', $col_name, 'width:', $col->{width}, 'chars =', $col->{width_pct}, '%'); } @@ -6026,10 +6550,10 @@ sub set_columns { $col->{right_most} = 1 if $i == $#cols; - push @{$self->{cols}}, $col; + push @{$self->cols}, $col; } - $self->{n_cols} = scalar @cols; + $self->n_cols( scalar @cols ); if ( ($used_width || 0) > 100 ) { die "Total width_pct for all columns is >100%"; @@ -6039,15 +6563,15 @@ sub set_columns { my $wid_per_col = int((100 - $used_width) / scalar @auto_width_cols); PTDEBUG && _d('Line width left:', (100-$used_width), '%;', 'each auto width col:', $wid_per_col, '%'); - map { $self->{cols}->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; + map { $self->cols->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; } - $min_hdr_wid += ($self->{n_cols} - 1) * length $self->{column_spacing}; + $min_hdr_wid += ($self->n_cols() - 1) * length $self->column_spacing(); PTDEBUG && _d('min header width:', $min_hdr_wid); - if ( $min_hdr_wid > $self->{line_width} ) { + if ( $min_hdr_wid > $self->line_width() ) { PTDEBUG && _d('Will truncate headers because min header width', - $min_hdr_wid, '> line width', $self->{line_width}); - $self->{truncate_headers} = 1; + $min_hdr_wid, '> line width', $self->line_width()); + $self->truncate_headers(1); } return; @@ -6056,14 +6580,14 @@ sub set_columns { sub add_line { my ( $self, @vals ) = @_; my $n_vals = scalar @vals; - if ( $n_vals != $self->{n_cols} ) { + if ( $n_vals != $self->n_cols() ) { $self->_column_error("Number of values $n_vals does not match " - . "number of columns $self->{n_cols}"); + . "number of columns " . $self->n_cols()); } for my $i ( 0..($n_vals-1) ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals[$i] ? $vals[$i] : $col->{undef_value}; - if ( $self->{strip_whitespace} ) { + if ( $self->strip_whitespace() ) { $val =~ s/^\s+//g; $val =~ s/\s+$//; $vals[$i] = $val; @@ -6072,7 +6596,7 @@ sub add_line { $col->{min_val} = min($width, ($col->{min_val} || $width)); $col->{max_val} = max($width, ($col->{max_val} || $width)); } - push @{$self->{lines}}, \@vals; + push @{$self->lines}, \@vals; return; } @@ -6080,26 +6604,28 @@ sub get_report { my ( $self, %args ) = @_; $self->_calculate_column_widths(); - $self->_truncate_headers() if $self->{truncate_headers}; + if ( $self->truncate_headers() ) { + $self->_truncate_headers(); + } $self->_truncate_line_values(%args); my @col_fmts = $self->_make_column_formats(); - my $fmt = ($self->{line_prefix} || '') - . join($self->{column_spacing}, @col_fmts); + my $fmt = $self->line_prefix() + . join($self->column_spacing(), @col_fmts); PTDEBUG && _d('Format:', $fmt); (my $hdr_fmt = $fmt) =~ s/%([^-])/%-$1/g; my @lines; - push @lines, sprintf "$self->{line_prefix}$self->{title}" if $self->{title}; + push @lines, $self->line_prefix() . $self->title() if $self->has_title(); push @lines, $self->_truncate_line( - sprintf($hdr_fmt, map { $_->{name} } @{$self->{cols}}), + sprintf($hdr_fmt, map { $_->{name} } @{$self->cols}), strip => 1, mark => '', ); - if ( $self->{underline_header} ) { - my @underlines = map { '=' x $_->{print_width} } @{$self->{cols}}; + if ( $self->underline_header() ) { + my @underlines = map { '=' x $_->{print_width} } @{$self->cols}; push @lines, $self->_truncate_line( sprintf($fmt, map { $_ || '' } @underlines), mark => '', @@ -6110,19 +6636,23 @@ sub get_report { my $vals = $_; my $i = 0; my @vals = map { - my $val = defined $_ ? $_ : $self->{cols}->[$i++]->{undef_value}; + my $val = defined $_ ? $_ : $self->cols->[$i++]->{undef_value}; $val = '' if !defined $val; $val =~ s/\n/ /g; $val; } @$vals; my $line = sprintf($fmt, @vals); - if ( $self->{extend_right} ) { + if ( $self->extend_right() ) { $line; } else { $self->_truncate_line($line); } - } @{$self->{lines}}; + } @{$self->lines}; + + $self->clear_cols(); + $self->clear_lines(); + $self->clear_truncate_headers(); return join("\n", @lines) . "\n"; } @@ -6130,7 +6660,7 @@ sub get_report { sub truncate_value { my ( $self, $col, $val, $width, $side ) = @_; return $val if length $val <= $width; - return $val if $col->{right_most} && $self->{extend_right}; + return $val if $col->{right_most} && $self->extend_right(); $side ||= $col->{truncate_side}; my $mark = $col->{truncate_mark}; if ( $side eq 'right' ) { @@ -6150,8 +6680,8 @@ sub _calculate_column_widths { my ( $self ) = @_; my $extra_space = 0; - foreach my $col ( @{$self->{cols}} ) { - my $print_width = int($self->{line_width} * ($col->{width_pct} / 100)); + foreach my $col ( @{$self->cols} ) { + my $print_width = int($self->line_width() * ($col->{width_pct} / 100)); PTDEBUG && _d('col:', $col->{name}, 'width pct:', $col->{width_pct}, 'char width:', $print_width, @@ -6175,7 +6705,7 @@ sub _calculate_column_widths { PTDEBUG && _d('Extra space:', $extra_space); while ( $extra_space-- ) { - foreach my $col ( @{$self->{cols}} ) { + foreach my $col ( @{$self->cols} ) { if ( $col->{auto_width} && ( $col->{print_width} < $col->{max_val} || $col->{print_width} < $col->{header_width}) @@ -6190,8 +6720,8 @@ sub _calculate_column_widths { sub _truncate_headers { my ( $self, $col ) = @_; - my $side = $self->{truncate_header_side}; - foreach my $col ( @{$self->{cols}} ) { + my $side = $self->truncate_header_side(); + foreach my $col ( @{$self->cols} ) { my $col_name = $col->{name}; my $print_width = $col->{print_width}; next if length $col_name <= $print_width; @@ -6204,10 +6734,10 @@ sub _truncate_headers { sub _truncate_line_values { my ( $self, %args ) = @_; - my $n_vals = $self->{n_cols} - 1; - foreach my $vals ( @{$self->{lines}} ) { + my $n_vals = $self->n_cols() - 1; + foreach my $vals ( @{$self->lines} ) { for my $i ( 0..$n_vals ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $val = defined $vals->[$i] ? $vals->[$i] : $col->{undef_value}; my $width = length $val; @@ -6233,9 +6763,9 @@ sub _truncate_line_values { sub _make_column_formats { my ( $self ) = @_; my @col_fmts; - my $n_cols = $self->{n_cols} - 1; + my $n_cols = $self->n_cols() - 1; for my $i ( 0..$n_cols ) { - my $col = $self->{cols}->[$i]; + my $col = $self->cols->[$i]; my $width = $col->{right_most} && !$col->{right_justify} ? '' : $col->{print_width}; @@ -6248,12 +6778,12 @@ sub _make_column_formats { sub _truncate_line { my ( $self, $line, %args ) = @_; - my $mark = defined $args{mark} ? $args{mark} : $self->{truncate_line_mark}; + my $mark = defined $args{mark} ? $args{mark} : $self->truncate_line_mark(); if ( $line ) { $line =~ s/\s+$// if $args{strip}; my $len = length($line); - if ( $len > $self->{line_width} ) { - $line = substr($line, 0, $self->{line_width} - length $mark); + if ( $len > $self->line_width() ) { + $line = substr($line, 0, $self->line_width() - length $mark); $line .= $mark if $mark; } } @@ -6263,7 +6793,7 @@ sub _truncate_line { sub _column_error { my ( $self, $err ) = @_; my $msg = "Column error: $err"; - $self->{column_errors} eq 'die' ? die $msg : warn $msg; + $self->column_errors() eq 'die' ? die $msg : warn $msg; return; } @@ -6292,8 +6822,7 @@ sub _d { { package QueryReportFormatter; -use strict; -use warnings FATAL => 'all'; +use Mo; use English qw(-no_match_vars); use POSIX qw(floor); @@ -6306,25 +6835,68 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0; use constant LINE_LENGTH => 74; use constant MAX_STRING_LENGTH => 10; -sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(OptionParser QueryRewriter Quoter) ) { - die "I need a $arg argument" unless $args{$arg}; +{ local $EVAL_ERROR; eval { require Quoter } }; +{ local $EVAL_ERROR; eval { require ReportFormatter } }; + +has Quoter => ( + is => 'ro', + isa => 'Quoter', + default => sub { Quoter->new() }, +); + +has label_width => ( + is => 'ro', + isa => 'Int', +); + +has global_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw( total min max avg 95% stddev median)] }, +); + +has event_headers => ( + is => 'ro', + isa => 'ArrayRef', + default => sub { [qw(pct total min max avg 95% stddev median)] }, +); + +has ReportFormatter => ( + is => 'ro', + isa => 'ReportFormatter', + builder => '_build_report_formatter', +); + +sub _build_report_formatter { + return ReportFormatter->new( + line_width => LINE_LENGTH, + extend_right => 1, + ); +} + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + foreach my $arg ( qw(OptionParser QueryRewriter) ) { + die "I need a $arg argument" unless $args->{$arg}; } - my $label_width = $args{label_width} || 12; + my $label_width = $args->{label_width} ||= 12; PTDEBUG && _d('Label width:', $label_width); - my $cheat_width = $label_width + 1; - + my $o = delete $args->{OptionParser}; my $self = { - %args, - label_width => $label_width, + %$args, + options => { + show_all => $o->get('show-all'), + shorten => $o->get('shorten'), + report_all => $o->get('report-all'), + report_histogram => $o->get('report-histogram'), + }, num_format => "# %-${label_width}s %3s %7s %7s %7s %7s %7s %7s %7s", bool_format => "# %-${label_width}s %3d%% yes, %3d%% no", string_format => "# %-${label_width}s %s", - global_headers => [qw( total min max avg 95% stddev median)], - event_headers => [qw(pct total min max avg 95% stddev median)], hidden_attrib => { # Don't sort/print these attribs in the reports. arg => 1, # They're usually handled specially, or not fingerprint => 1, # printed at all. @@ -6332,18 +6904,7 @@ sub new { ts => 1, }, }; - return bless $self, $class; -} - -sub set_report_formatter { - my ( $self, %args ) = @_; - my @required_args = qw(report formatter); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless exists $args{$arg}; - } - my ($report, $formatter) = @args{@required_args}; - $self->{formatter_for}->{$report} = $formatter; - return; + return $self; } sub print_reports { @@ -6449,7 +7010,7 @@ sub header { shorten(scalar keys %{$results->{classes}}, d=>1_000), shorten($qps || 0, d=>1_000), shorten($conc || 0, d=>1_000)); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); push @result, $line; if ( my $ts = $results->{globals}->{ts} ) { @@ -6510,8 +7071,8 @@ sub header { return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } -sub query_report { - my ( $self, %args ) = @_; +sub query_report_values { + my ($self, %args) = @_; foreach my $arg ( qw(ea worst orderby groupby) ) { die "I need a $arg argument" unless defined $arg; } @@ -6519,11 +7080,63 @@ sub query_report { my $groupby = $args{groupby}; my $worst = $args{worst}; - my $o = $self->{OptionParser}; - my $q = $self->{Quoter}; + my $q = $self->Quoter; my $qv = $self->{QueryReview}; my $qr = $self->{QueryRewriter}; + my @values; + ITEM: + foreach my $top_event ( @$worst ) { + my $item = $top_event->[0]; + my $reason = $args{explain_why} ? $top_event->[1] : ''; + my $rank = $top_event->[2]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + my $samp_query = $sample->{arg} || ''; + + my %item_vals = ( + item => $item, + samp_query => $samp_query, + rank => ($rank || 0), + reason => $reason, + ); + + my $review_vals; + if ( $qv ) { + $review_vals = $qv->get_review_info($item); + next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_histogram}; + for my $col ( $qv->review_cols() ) { + $item_vals{review_vals}{$col} = $review_vals->{$col}; + } + } + + $item_vals{default_db} = $sample->{db} ? $sample->{db} + : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} + : undef; + $item_vals{tables} = [$self->{QueryParser}->extract_tables( + query => $samp_query, + default_db => $item_vals{default_db}, + Quoter => $self->Quoter, + )]; + + if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { + $item_vals{crc} = crc32($samp_query); + } + + push @values, \%item_vals; + } + return \@values; +} + +sub query_report { + my ( $self, %args ) = @_; + + my $ea = $args{ea}; + my $groupby = $args{groupby}; + my $report_values = $self->query_report_values(%args); + + my $qr = $self->{QueryRewriter}; + my $report = ''; if ( $args{print_header} ) { @@ -6538,52 +7151,31 @@ sub query_report { ); ITEM: - foreach my $top_event ( @$worst ) { - my $item = $top_event->[0]; - my $reason = $args{explain_why} ? $top_event->[1] : ''; - my $rank = $top_event->[2]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; - my $samp_query = $sample->{arg} || ''; - - my $review_vals; - if ( $qv ) { - $review_vals = $qv->get_review_info($item); - next ITEM if $review_vals->{reviewed_by} && !$o->get('report-all'); - } - - my ($default_db) = $sample->{db} ? $sample->{db} - : $stats->{db}->{unq} ? keys %{$stats->{db}->{unq}} - : undef; - my @tables = $self->{QueryParser}->extract_tables( - query => $samp_query, - default_db => $default_db, - Quoter => $self->{Quoter}, - ); - - $report .= "\n" if $rank > 1; # space between each event report + foreach my $vals ( @$report_values ) { + my $item = $vals->{item}; + $report .= "\n" if $vals->{rank} > 1; # space between each event report $report .= $self->event_report( %args, item => $item, - sample => $sample, - rank => $rank, - reason => $reason, + sample => $ea->results->{samples}->{$item}, + rank => $vals->{rank}, + reason => $vals->{reason}, attribs => $attribs, - db => $default_db, + db => $vals->{default_db}, ); - if ( $o->get('report-histogram') ) { + if ( $self->{options}->{report_histogram} ) { $report .= $self->chart_distro( %args, - attrib => $o->get('report-histogram'), - item => $item, + attrib => $self->{options}->{report_histogram}, + item => $vals->{item}, ); } - if ( $qv && $review_vals ) { + if ( $vals->{review_vals} ) { $report .= "# Review information\n"; - foreach my $col ( $qv->review_cols() ) { - my $val = $review_vals->{$col}; + foreach my $col ( keys %{$vals->{review_vals}} ) { + my $val = $vals->{review_vals}->{$col}; if ( !$val || $val ne '0000-00-00 00:00:00' ) { # issue 202 $report .= sprintf "# %13s: %-s\n", $col, ($val ? $val : ''); } @@ -6591,16 +7183,15 @@ sub query_report { } if ( $groupby eq 'fingerprint' ) { - $samp_query = $qr->shorten($samp_query, $o->get('shorten')) - if $o->get('shorten'); + my $samp_query = $qr->shorten($vals->{samp_query}, $self->{options}->{shorten}) + if $self->{options}->{shorten}; - PTDEBUG && _d("Fingerprint\n# $item\n"); + PTDEBUG && _d("Fingerprint\n# $vals->{item}\n"); - $report .= $self->tables_report(@tables); + $report .= $self->tables_report(@{$vals->{tables}}); - if ( $samp_query && ($args{variations} && @{$args{variations}}) ) { - my $crc = crc32($samp_query); - $report.= "# CRC " . ($crc ? $crc % 1_000 : "") . "\n"; + if ( $vals->{crc} ) { + $report.= "# CRC " . ($vals->{crc} % 1_000) . "\n"; } my $log_type = $args{log_type} || ''; @@ -6614,7 +7205,7 @@ sub query_report { } else { $report .= "# EXPLAIN /*!50100 PARTITIONS*/\n$samp_query${mark}\n"; - $report .= $self->explain_report($samp_query, $default_db); + $report .= $self->explain_report($samp_query, $vals->{default_db}); } } else { @@ -6628,7 +7219,7 @@ sub query_report { } else { if ( $groupby eq 'tables' ) { - my ( $db, $tbl ) = $q->split_unquote($item); + my ( $db, $tbl ) = $self->Quoter->split_unquote($item); $report .= $self->tables_report([$db, $tbl]); } $report .= "$item\n"; @@ -6638,20 +7229,19 @@ sub query_report { return $report; } -sub event_report { - my ( $self, %args ) = @_; - foreach my $arg ( qw(ea item orderby) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $ea = $args{ea}; - my $item = $args{item}; +sub event_report_values { + my ($self, %args) = @_; + + my $ea = $args{ea}; + my $item = $args{item}; my $orderby = $args{orderby}; my $results = $ea->results(); - my $o = $self->{OptionParser}; - my @result; + + my %vals; my $store = $results->{classes}->{$item}; - return "# No such event $item\n" unless $store; + + return unless $store; my $global_cnt = $results->{globals}->{$orderby}->{cnt}; my $class_cnt = $store->{$orderby}->{cnt}; @@ -6670,43 +7260,26 @@ sub event_report { }; } - my $line = sprintf( - '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', - ($ea->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), - $args{rank} || 0, - shorten($qps || 0, d=>1_000), - shorten($conc || 0, d=>1_000), - make_checksum($item), - $results->{samples}->{$item}->{pos_in_log} || 0, - ); - $line .= ('_' x (LINE_LENGTH - length($line) + $self->{label_width} - 12)); - push @result, $line; - - if ( $args{reason} ) { - push @result, - "# This item is included in the report because it matches " - . ($args{reason} eq 'top' ? '--limit.' : '--outliers.'); - } - - { + $vals{groupby} = $ea->{groupby}; + $vals{qps} = $qps || 0; + $vals{concurrency} = $conc || 0; + $vals{checksum} = make_checksum($item); + $vals{pos_in_log} = $results->{samples}->{$item}->{pos_in_log} || 0; + $vals{reason} = $args{reason}; + $vals{variance_to_mean} = do { my $query_time = $ea->metrics(where => $item, attrib => 'Query_time'); - push @result, - sprintf("# Scores: V/M = %.2f", - ($query_time->{stddev}**2 / ($query_time->{avg} || 1)), - ); + $query_time->{stddev}**2 / ($query_time->{avg} || 1) + }; + + $vals{counts} = { + class_cnt => $class_cnt, + global_cnt => $global_cnt, + }; + + if ( my $ts = $store->{ts}) { + $vals{time_range} = $self->format_time_range($ts) || "unknown"; } - if ( my $ts = $store->{ts} ) { - my $time_range = $self->format_time_range($ts) || "unknown"; - push @result, "# Time range: $time_range"; - } - - push @result, $self->make_event_header(); - - push @result, - sprintf $self->{num_format}, 'Count', - percentage_of($class_cnt, $global_cnt), $class_cnt, map { '' } (1..8); - my $attribs = $args{attribs}; if ( !$attribs ) { $attribs = $self->sort_attribs( @@ -6715,10 +7288,9 @@ sub event_report { ); } + $vals{attributes} = { map { $_ => [] } qw(num innodb bool string) }; + foreach my $type ( qw(num innodb) ) { - if ( $type eq 'innodb' && @{$attribs->{$type}} ) { - push @result, "# InnoDB:"; - }; NUM_ATTRIB: foreach my $attrib ( @{$attribs->{$type}} ) { @@ -6738,15 +7310,12 @@ sub event_report { $pct = percentage_of( $vals->{sum}, $results->{globals}->{$attrib}->{sum}); - push @result, - sprintf $self->{num_format}, - $self->make_label($attrib), $pct, @values; + push @{$vals{attributes}{$type}}, + [ $attrib, $pct, @values ]; } } if ( @{$attribs->{bool}} ) { - push @result, "# Boolean:"; - my $printed_bools = 0; BOOL_ATTRIB: foreach my $attrib ( @{$attribs->{bool}} ) { next BOOL_ATTRIB unless exists $store->{$attrib}; @@ -6754,33 +7323,115 @@ sub event_report { next unless scalar %$vals; if ( $vals->{sum} > 0 ) { - push @result, - sprintf $self->{bool_format}, - $self->make_label($attrib), $self->bool_percents($vals); - $printed_bools = 1; + push @{$vals{attributes}{bool}}, + [ $attrib, $self->bool_percents($vals) ]; } } - pop @result unless $printed_bools; } if ( @{$attribs->{string}} ) { - push @result, "# String:"; - my $printed_strings = 0; STRING_ATTRIB: foreach my $attrib ( @{$attribs->{string}} ) { next STRING_ATTRIB unless exists $store->{$attrib}; my $vals = $store->{$attrib}; next unless scalar %$vals; + push @{$vals{attributes}{string}}, + [ $attrib, $vals ]; + } + } + + + return \%vals; +} + + +sub event_report { + my ( $self, %args ) = @_; + foreach my $arg ( qw(ea item orderby) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $item = $args{item}; + my $val = $self->event_report_values(%args); + my @result; + + return "# No such event $item\n" unless $val; + + my $line = sprintf( + '# %s %d: %s QPS, %sx concurrency, ID 0x%s at byte %.f ', + ($val->{groupby} eq 'fingerprint' ? 'Query' : 'Item'), + $args{rank} || 0, + shorten($val->{qps}, d=>1_000), + shorten($val->{concurrency}, d=>1_000), + $val->{checksum}, + $val->{pos_in_log}, + ); + $line .= ('_' x (LINE_LENGTH - length($line) + $self->label_width() - 12)); + push @result, $line; + + if ( $val->{reason} ) { + push @result, + "# This item is included in the report because it matches " + . ($val->{reason} eq 'top' ? '--limit.' : '--outliers.'); + } + + push @result, + sprintf("# Scores: V/M = %.2f", $val->{variance_to_mean} ); + + if ( $val->{time_range} ) { + push @result, "# Time range: $val->{time_range}"; + } + + push @result, $self->make_event_header(); + + push @result, + sprintf $self->{num_format}, 'Count', + percentage_of($val->{counts}{class_cnt}, $val->{counts}{global_cnt}), + $val->{counts}{class_cnt}, + map { '' } (1..8); + + + my $attribs = $val->{attributes}; + + foreach my $type ( qw(num innodb) ) { + if ( $type eq 'innodb' && @{$attribs->{$type}} ) { + push @result, "# InnoDB:"; + }; + + NUM_ATTRIB: + foreach my $attrib ( @{$attribs->{$type}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{num_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{bool}} ) { + push @result, "# Boolean:"; + BOOL_ATTRIB: + foreach my $attrib ( @{$attribs->{bool}} ) { + my ($attrib_name, @vals) = @$attrib; + push @result, + sprintf $self->{bool_format}, + $self->make_label($attrib_name), @vals; + } + } + + if ( @{$attribs->{string}} ) { + push @result, "# String:"; + STRING_ATTRIB: + foreach my $attrib ( @{$attribs->{string}} ) { + my ($attrib_name, $vals) = @$attrib; push @result, sprintf $self->{string_format}, - $self->make_label($attrib), - $self->format_string_list($attrib, $vals, $class_cnt); - $printed_strings = 1; + $self->make_label($attrib_name), + $self->format_string_list($attrib_name, $vals, $val->{counts}{class_cnt}); } - pop @result unless $printed_strings; } + return join("\n", map { s/\s+$//; $_ } @result) . "\n"; } @@ -6843,7 +7494,6 @@ sub profile { my $groupby = $args{groupby}; my $qr = $self->{QueryRewriter}; - my $o = $self->{OptionParser}; my $results = $ea->results(); my $total_r = $results->{globals}->{Query_time}->{sum} || 0; @@ -6870,12 +7520,8 @@ sub profile { push @profiles, \%profile; } - my $report = $self->{formatter_for}->{profile} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Profile'); + my $report = $self->ReportFormatter(); + $report->title('Profile'); my @cols = ( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, @@ -7009,12 +7655,8 @@ sub prepared { return unless scalar @prepared; - my $report = $self->{formatter_for}->{prepared} || new ReportFormatter( - line_width => LINE_LENGTH, - long_last_column => 1, - extend_right => 1, - ); - $report->set_title('Prepared statements'); + my $report = $self->ReportFormatter(); + $report->title('Prepared statements'); $report->set_columns( { name => 'Rank', right_justify => 1, }, { name => 'Query ID', }, @@ -7048,11 +7690,11 @@ sub make_global_header { my @lines; push @lines, - sprintf $self->{num_format}, "Attribute", '', @{$self->{global_headers}}; + sprintf $self->{num_format}, "Attribute", '', @{$self->global_headers()}; push @lines, sprintf $self->{num_format}, - (map { "=" x $_ } $self->{label_width}), + (map { "=" x $_ } $self->label_width()), (map { " " x $_ } qw(3)), # no pct column in global header (map { "=" x $_ } qw(7 7 7 7 7 7 7)); @@ -7066,11 +7708,11 @@ sub make_event_header { my @lines; push @lines, - sprintf $self->{num_format}, "Attribute", @{$self->{event_headers}}; + sprintf $self->{num_format}, "Attribute", @{$self->event_headers()}; push @lines, sprintf $self->{num_format}, - map { "=" x $_ } ($self->{label_width}, qw(3 7 7 7 7 7 7 7)); + map { "=" x $_ } ($self->label_width(), qw(3 7 7 7 7 7 7 7)); $self->{event_header_lines} = \@lines; return @lines; @@ -7085,7 +7727,7 @@ sub make_label { if ( $val =~ m/^InnoDB/ ) { $val =~ s/^InnoDB //; $val = $val eq 'trx id' ? "InnoDB trxID" - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width()); } $val = $val eq 'user' ? 'Users' @@ -7096,7 +7738,7 @@ sub make_label { : $val eq 'bytes' ? 'Query size' : $val eq 'Tmp disk tables' ? 'Tmp disk tbl' : $val eq 'Tmp table sizes' ? 'Tmp tbl size' - : substr($val, 0, $self->{label_width}); + : substr($val, 0, $self->label_width); return $val; } @@ -7110,8 +7752,7 @@ sub bool_percents { sub format_string_list { my ( $self, $attrib, $vals, $class_cnt ) = @_; - my $o = $self->{OptionParser}; - my $show_all = $o->get('show-all'); + my $show_all = $self->{options}->{show_all}; if ( !exists $vals->{unq} ) { return ($vals->{cnt}); @@ -7241,7 +7882,7 @@ sub pref_sort { sub tables_report { my ( $self, @tables ) = @_; return '' unless @tables; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $tables = ""; foreach my $db_tbl ( @tables ) { my ( $db, $tbl ) = @$db_tbl; @@ -7260,7 +7901,7 @@ sub explain_report { return '' unless $query; my $dbh = $self->{dbh}; - my $q = $self->{Quoter}; + my $q = $self->Quoter(); my $qp = $self->{QueryParser}; return '' unless $dbh && $q && $qp; @@ -7323,6 +7964,98 @@ sub _d { # End QueryReportFormatter package # ########################################################################### +# ########################################################################### +# JSONReportFormatter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/JSONReportFormatter.pm +# t/lib/JSONReportFormatter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package JSONReportFormatter; +use Mo; +use JSON; + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +extends qw(QueryReportFormatter); + +override [qw(rusage date hostname files header profile prepared)] => sub { + return; +}; + +override event_report => sub { + my ($self, %args) = @_; + return $self->event_report_values(%args); +}; + +override query_report => sub { + my ($self, %args) = @_; + foreach my $arg ( qw(ea worst orderby groupby) ) { + die "I need a $arg argument" unless defined $arg; + } + my $ea = $args{ea}; + my $groupby = $args{groupby}; + my $worst = $args{worst}; + + my $q = $self->Quoter; + my $qv = $self->{QueryReview}; + my $qr = $self->{QueryRewriter}; + + my $query_report_vals = $self->query_report_values(%args); + + my $attribs = $self->sort_attribs( + ($args{select} ? $args{select} : $ea->get_attributes()), + $ea, + ); + + ITEM: + foreach my $vals ( @$query_report_vals ) { + my $item = $vals->{item}; + my $samp_query = $vals->{samp_query}; + $vals->{event_report} = $self->event_report( + %args, + item => $item, + sample => $ea->results->{samples}->{$item}, + rank => $vals->{rank}, + reason => $vals->{reason}, + attribs => $attribs, + db => $vals->{default_db}, + ); + + if ( $groupby eq 'fingerprint' ) { + if ( $item =~ m/^(?:[\(\s]*select|insert|replace)/ ) { + if ( $item !~ m/^(?:insert|replace)/ ) { # No EXPLAIN + $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$samp_query\\G\n"; + $vals->{explain_report} = $self->explain_report($samp_query, $vals->{default_db}); + } + } + else { + my $converted = $qr->convert_to_select($samp_query); + if ( $converted + && $converted =~ m/^[\(\s]*select/i ) { + $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$converted\\G\n"; + } + } + } + else { + if ( $groupby eq 'tables' ) { + my ( $db, $tbl ) = $q->split_unquote($item); + $vals->{tables_report} = $self->tables_report([$db, $tbl]); + } + } + } + + return encode_json($query_report_vals) . "\n"; +}; + +1; +} +# ########################################################################### +# End JSONReportFormatter package +# ########################################################################### + # ########################################################################### # EventTimeline package # This package is a copy without comments from the original. The original @@ -11106,171 +11839,6 @@ sub _d { # End FileIterator package # ########################################################################### -# ########################################################################### -# ExplainAnalyzer package -# This package is a copy without comments from the original. The original -# with comments and its test file can be found in the Bazaar repository at, -# lib/ExplainAnalyzer.pm -# t/lib/ExplainAnalyzer.t -# See https://launchpad.net/percona-toolkit for more information. -# ########################################################################### -{ -package ExplainAnalyzer; - -use strict; -use warnings FATAL => 'all'; -use English qw(-no_match_vars); -use constant PTDEBUG => $ENV{PTDEBUG} || 0; - -use Data::Dumper; -$Data::Dumper::Indent = 1; -$Data::Dumper::Sortkeys = 1; -$Data::Dumper::Quotekeys = 0; - -sub new { - my ( $class, %args ) = @_; - foreach my $arg ( qw(QueryRewriter QueryParser) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my $self = { - %args, - }; - return bless $self, $class; -} - -sub explain_query { - my ( $self, %args ) = @_; - foreach my $arg ( qw(dbh query) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($query, $dbh) = @args{qw(query dbh)}; - $query = $self->{QueryRewriter}->convert_to_select($query); - if ( $query !~ m/^\s*select/i ) { - PTDEBUG && _d("Cannot EXPLAIN non-SELECT query:", - (length $query <= 100 ? $query : substr($query, 0, 100) . "...")); - return; - } - my $sql = "EXPLAIN $query"; - PTDEBUG && _d($dbh, $sql); - my $explain = $dbh->selectall_arrayref($sql, { Slice => {} }); - PTDEBUG && _d("Result of EXPLAIN:", Dumper($explain)); - return $explain; -} - -sub normalize { - my ( $self, $explain ) = @_; - my @result; # Don't modify the input. - - foreach my $row ( @$explain ) { - $row = { %$row }; # Make a copy -- don't modify the input. - - foreach my $col ( qw(key possible_keys key_len ref) ) { - $row->{$col} = [ split(/,/, $row->{$col} || '') ]; - } - - $row->{Extra} = { - map { - my $var = $_; - - if ( my ($key, $vals) = $var =~ m/(Using union)\(([^)]+)\)/ ) { - $key => [ split(/,/, $vals) ]; - } - - else { - $var => 1; - } - } - split(/; /, $row->{Extra} || '') # Split on semicolons. - }; - - push @result, $row; - } - - return \@result; -} - -sub get_alternate_indexes { - my ( $self, $keys, $possible_keys ) = @_; - my %used = map { $_ => 1 } @$keys; - return [ grep { !$used{$_} } @$possible_keys ]; -} - -sub get_index_usage { - my ( $self, %args ) = @_; - foreach my $arg ( qw(query explain) ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($query, $explain) = @args{qw(query explain)}; - my @result; - - my $lookup = $self->{QueryParser}->get_aliases($query); - - foreach my $row ( @$explain ) { - - next if !defined $row->{table} - || $row->{table} =~ m/^<(derived|union)\d/; - - my $table = $lookup->{TABLE}->{$row->{table}} || $row->{table}; - my $db = $lookup->{DATABASE}->{$table} || $args{db}; - push @result, { - db => $db, - tbl => $table, - idx => $row->{key}, - alt => $self->get_alternate_indexes( - $row->{key}, $row->{possible_keys}), - }; - } - - PTDEBUG && _d("Index usage for", - (length $query <= 100 ? $query : substr($query, 0, 100) . "..."), - ":", Dumper(\@result)); - return \@result; -} - -sub get_usage_for { - my ( $self, $checksum, $db ) = @_; - die "I need a checksum and db" unless defined $checksum && defined $db; - my $usage; - if ( exists $self->{usage}->{$db} # Don't auto-vivify - && exists $self->{usage}->{$db}->{$checksum} ) - { - $usage = $self->{usage}->{$db}->{$checksum}; - } - PTDEBUG && _d("Usage for", - (length $checksum <= 100 ? $checksum : substr($checksum, 0, 100) . "..."), - "on", $db, ":", Dumper($usage)); - return $usage; -} - -sub save_usage_for { - my ( $self, $checksum, $db, $usage ) = @_; - die "I need a checksum and db" unless defined $checksum && defined $db; - $self->{usage}->{$db}->{$checksum} = $usage; -} - -sub fingerprint { - my ( $self, %args ) = @_; - my @required_args = qw(explain); - foreach my $arg ( @required_args ) { - die "I need a $arg argument" unless defined $args{$arg}; - } - my ($explain) = @args{@required_args}; -} - -sub _d { - my ($package, undef, $line) = caller 0; - @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } - map { defined $_ ? $_ : 'undef' } - @_; - print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; -} - -1; -} -# ########################################################################### -# End ExplainAnalyzer package -# ########################################################################### - # ########################################################################### # Runtime package # This package is a copy without comments from the original. The original @@ -12913,6 +13481,7 @@ Transformers->import(qw(shorten micro_t percentage_of ts make_checksum any_unix_timestamp parse_timestamp unix_timestamp crc32)); use Percona::Toolkit; +use JSONReportFormatter; use constant PTDEBUG => $ENV{PTDEBUG} || 0; use sigtrap 'handler', \&sig_int, 'normal-signals'; @@ -13025,7 +13594,6 @@ sub main { # ######################################################################## # Set up for --explain # ######################################################################## - my $exa; if ( my $ep_dsn = $o->get('explain') ) { $ep_dbh = get_cxn( for => '--explain', @@ -13035,11 +13603,6 @@ sub main { opts => { AutoCommit => 1 }, ); $ep_dbh->{InactiveDestroy} = 1; # Don't die on fork(). - - $exa = new ExplainAnalyzer( - QueryRewriter => $qr, - QueryParser => $qp, - ); } # ######################################################################## @@ -13646,7 +14209,6 @@ sub main { files => \@read_files, Pipeline => $pipeline, QueryReview => $qv, - ExplainAnalyzer => $exa, %common_modules, ); } @@ -14194,19 +14756,18 @@ sub print_reports { $print_header = 1; } - my $qrf = new QueryReportFormatter( - dbh => $ep_dbh, - %args, - ); - # http://code.google.com/p/maatkit/issues/detail?id=1141 - $qrf->set_report_formatter( - report => 'profile', - formatter => new ReportFormatter ( - line_width => $o->get('explain') ? 82 : 74, - long_last_column => 1, - extend_right => 1, - ), + my $report_class = $o->get('output') =~ m/\Ajson\z/i + ? 'JSONReportFormatter' + : 'QueryReportFormatter'; + my $qrf = $report_class->new( + dbh => $ep_dbh, + QueryReview => $args{QueryReview}, + QueryRewriter => $args{QueryRewriter}, + OptionParser => $args{OptionParser}, + QueryParser => $args{QueryParser}, + Quoter => $args{Quoter}, ); + $qrf->print_reports( reports => \@reports, ea => $eas->[$i], @@ -14256,7 +14817,7 @@ sub print_reports { { name => 'Time', right_justify => 1 }, { name => 'Count', right_justify => 1 }, ); - $report->set_title('Pipeline profile'); + $report->title('Pipeline profile'); my $instrument = $pipeline->instrumentation; my $total_time = $instrument->{Pipeline}; foreach my $process_name ( $pipeline->processes() ) { @@ -15439,6 +16000,13 @@ seconds and which are seen at least 5 times, use the following argument: You can specify an --outliers option for each value in L<"--group-by">. + +=item --output + +type: string; default: query + +Type of report to use. Accepted values are C<"query"> and C<"json">. + =item --password short form: -p; type: string From d1d1b04f6e6c2f4df8daa3ce9587552b35c44f08 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Tue, 15 Jan 2013 10:29:17 -0300 Subject: [PATCH 13/34] Simplistic --resume implementation --- bin/pt-query-digest | 56 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 2 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 318217c5..0a2bc528 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -11776,6 +11776,8 @@ use warnings FATAL => 'all'; use English qw(-no_match_vars); use constant PTDEBUG => $ENV{PTDEBUG} || 0; +use Fcntl qw(:seek); + sub new { my ( $class, %args ) = @_; my $self = { @@ -11818,6 +11820,9 @@ sub get_file_itr { } open my $fh, '<', $fn or warn "Cannot open $fn: $OS_ERROR"; if ( $fh ) { + if ( my $pos = $self->{resume}->{$fn} ) { + seek $fh, $pos, SEEK_SET; + } return ( $fh, $fn, -s $fn ); } } @@ -13473,6 +13478,7 @@ use Time::Local qw(timelocal); use Time::HiRes qw(time usleep); use List::Util qw(max); use POSIX qw(signal_h); +use File::Spec; use Data::Dumper; $Data::Dumper::Indent = 1; $OUTPUT_AUTOFLUSH = 1; @@ -13492,6 +13498,10 @@ my $ep_dbh; # For --explain my $ps_dbh; # For Processlist my $aux_dbh; # For --aux-dsn (--since/--until "MySQL expression") +my %resume; +my $save_resume = undef; +my $resume_file = File::Spec->catfile(File::Spec->tmpdir(), 'pt-query-digest-resume'); + sub main { local @ARGV = @_; # set global ARGV for this package $oktorun = 1; # reset between tests else pipeline won't run @@ -13577,6 +13587,8 @@ sub main { $o->usage_or_errors(); + $save_resume = $o->get('resume'); + # ######################################################################## # Common modules. # ####################################################################### @@ -13727,6 +13739,16 @@ sub main { } } + if ( $o->get('resume') ) { + if (open my $resume_fh, q{<}, $resume_file) { + while (my $line = <$resume_fh>) { + chomp $line; + my ($file, $pos) = $line =~ m/\A(.+)\t([0-9]+)\z/; + $resume{$file} = $pos; + } + } + } + # ######################################################################## # Create all the pipeline processes that do all the work: get input, # parse events, manage runtime, switch iterations, aggregate, etc. @@ -13775,7 +13797,7 @@ sub main { } # prep { # input - my $fi = new FileIterator(); + my $fi = new FileIterator(resume => \%resume); my $next_file = $fi->get_file_itr(@ARGV); my $input_fh; # the current input fh my $pr; # Progress obj for ^ @@ -13807,8 +13829,13 @@ sub main { else { $args->{next_event} = sub { return <$fh>; }; } + $args->{filename} = $filename; $args->{input_fh} = $fh; - $args->{tell} = sub { return tell $fh; }; + $args->{tell} = sub { + my $pos = tell $fh; + $args->{pos_for}->{$args->{filename}} = $pos; + return $pos; + }; $args->{more_events} = 1; # Reset in case we read two logs out of order by time. @@ -14313,6 +14340,10 @@ sub main { # we may just be between iters. $args->{Runtime}->reset(); $args->{time_left} = undef; + + if ( $args->{filename} ) { + $resume{$args->{filename}} = $args->{pos_for}->{$args->{filename}}; + } } # Continue the pipeline even if we reported and went to the next @@ -14693,6 +14724,8 @@ sub main { grep { $_ } ($qv_dbh, $qv_dbh2, $ps_dbh, $ep_dbh, $aux_dbh); + save_resume_data(); + return 0; } # End main() @@ -14835,9 +14868,23 @@ sub print_reports { return; } +sub save_resume_data { + return unless $save_resume; + return unless %resume; + if ( open my $resume_fh, q{>}, $resume_file ) { + while ( my ($k, $v) = each %resume ) { + print { $resume_fh } "$k\t$v\n"; + } + close $resume_fh; + } +} + # Catches signals so we can exit gracefully. sub sig_int { my ( $signal ) = @_; + + save_resume_data(); + if ( $oktorun ) { print STDERR "# Caught SIG$signal.\n"; $oktorun = 0; @@ -16134,6 +16181,11 @@ like: See L<"OUTPUT"> for more information. +=item --resume + +If enabled, the tool will save the furthest it got into the log before exiting; +Future runs on that log with --resume enabled will start from that position. + =item --review type: DSN From 1e8e8a8398ae586b32c1336c723910055280c55e Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Wed, 16 Jan 2013 07:56:04 -0300 Subject: [PATCH 14/34] Make Transformers work if called with use --- lib/Transformers.pm | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/lib/Transformers.pm b/lib/Transformers.pm index 00bd89f6..561111fd 100644 --- a/lib/Transformers.pm +++ b/lib/Transformers.pm @@ -31,24 +31,26 @@ use Time::Local qw(timegm timelocal); use Digest::MD5 qw(md5_hex); use B qw(); -require Exporter; -our @ISA = qw(Exporter); -our %EXPORT_TAGS = (); -our @EXPORT = (); -our @EXPORT_OK = qw( - micro_t - percentage_of - secs_to_time - time_to_secs - shorten - ts - parse_timestamp - unix_timestamp - any_unix_timestamp - make_checksum - crc32 - encode_json -); +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; From e8a8ebbcac5cfece5d0f95eed31a1674ef255f5b Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Wed, 16 Jan 2013 07:59:19 -0300 Subject: [PATCH 15/34] Changed the JSON output --- bin/pt-query-digest | 153 +++++++++++++++++++++---------------- lib/JSONReportFormatter.pm | 114 +++++++++++++++------------ 2 files changed, 152 insertions(+), 115 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 0a2bc528..0b1b71b5 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -2082,24 +2082,26 @@ use Time::Local qw(timegm timelocal); use Digest::MD5 qw(md5_hex); use B qw(); -require Exporter; -our @ISA = qw(Exporter); -our %EXPORT_TAGS = (); -our @EXPORT = (); -our @EXPORT_OK = qw( - micro_t - percentage_of - secs_to_time - time_to_secs - shorten - ts - parse_timestamp - unix_timestamp - any_unix_timestamp - make_checksum - crc32 - encode_json -); +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; @@ -7977,10 +7979,50 @@ package JSONReportFormatter; use Mo; use JSON; +use Transformers qw(make_checksum); + use constant PTDEBUG => $ENV{PTDEBUG} || 0; extends qw(QueryReportFormatter); +has history_metrics => ( + is => 'ro', + isa => 'HashRef', +); + +sub BUILDARGS { + my $class = shift; + my %orig_args = @_; + my $args = $class->SUPER::BUILDARGS(@_); + + my $o = $orig_args{OptionParser}; + + my $sql = $o->read_para_after( + __FILE__, qr/MAGIC_create_review_history/); + + my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); + $pat = qr/\ {3}(\w+?)_($pat)\s+/; + + my %metrics; + foreach my $sql_line (split /\n/, $sql) { + my ( $attr, $metric ) = $sql_line =~ $pat; + next unless $attr && $metric; + + $attr = ucfirst $attr if $attr =~ m/_/; + $attr = 'Filesort' if $attr eq 'filesort'; + + $attr =~ s/^Qc_hit/QC_Hit/; # Qc_hit is really QC_Hit + $attr =~ s/^Innodb/InnoDB/g; # Innodb is really InnoDB + $attr =~ s/_io_/_IO_/g; # io is really IO + + $metrics{$attr}{$metric} = 1; + } + + $args->{history_metrics} = \%metrics; + + return $args; +} + override [qw(rusage date hostname files header profile prepared)] => sub { return; }; @@ -7995,59 +8037,41 @@ override query_report => sub { foreach my $arg ( qw(ea worst orderby groupby) ) { die "I need a $arg argument" unless defined $arg; } - my $ea = $args{ea}; - my $groupby = $args{groupby}; - my $worst = $args{worst}; - my $q = $self->Quoter; - my $qv = $self->{QueryReview}; - my $qr = $self->{QueryRewriter}; + my $ea = $args{ea}; + my $worst = $args{worst}; - my $query_report_vals = $self->query_report_values(%args); + my $history_metrics = $self->history_metrics; + my @attribs = grep { $history_metrics->{$_} } @{$ea->get_attributes()}; - my $attribs = $self->sort_attribs( - ($args{select} ? $args{select} : $ea->get_attributes()), - $ea, - ); + my @queries; + foreach my $worst_info ( @$worst ) { + my $item = $worst_info->[0]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; - ITEM: - foreach my $vals ( @$query_report_vals ) { - my $item = $vals->{item}; - my $samp_query = $vals->{samp_query}; - $vals->{event_report} = $self->event_report( - %args, - item => $item, - sample => $ea->results->{samples}->{$item}, - rank => $vals->{rank}, - reason => $vals->{reason}, - attribs => $attribs, - db => $vals->{default_db}, - ); + my %metrics; + foreach my $attrib ( @attribs ) { + $metrics{$attrib} = $ea->metrics( + attrib => $attrib, + where => $item, + ); - if ( $groupby eq 'fingerprint' ) { - if ( $item =~ m/^(?:[\(\s]*select|insert|replace)/ ) { - if ( $item !~ m/^(?:insert|replace)/ ) { # No EXPLAIN - $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$samp_query\\G\n"; - $vals->{explain_report} = $self->explain_report($samp_query, $vals->{default_db}); - } - } - else { - my $converted = $qr->convert_to_select($samp_query); - if ( $converted - && $converted =~ m/^[\(\s]*select/i ) { - $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$converted\\G\n"; - } - } - } - else { - if ( $groupby eq 'tables' ) { - my ( $db, $tbl ) = $q->split_unquote($item); - $vals->{tables_report} = $self->tables_report([$db, $tbl]); + my $needed_metrics = $history_metrics->{$attrib}; + for my $key ( keys %{$metrics{$attrib}} ) { + delete $metrics{$attrib}{$key} + unless $needed_metrics->{$key}; } } + + push @queries, { + sample => $sample, + checksum => make_checksum($item), + %metrics + }; } - return encode_json($query_report_vals) . "\n"; + return encode_json(\@queries) . "\n"; }; 1; @@ -11776,8 +11800,6 @@ use warnings FATAL => 'all'; use English qw(-no_match_vars); use constant PTDEBUG => $ENV{PTDEBUG} || 0; -use Fcntl qw(:seek); - sub new { my ( $class, %args ) = @_; my $self = { @@ -11820,9 +11842,6 @@ sub get_file_itr { } open my $fh, '<', $fn or warn "Cannot open $fn: $OS_ERROR"; if ( $fh ) { - if ( my $pos = $self->{resume}->{$fn} ) { - seek $fh, $pos, SEEK_SET; - } return ( $fh, $fn, -s $fn ); } } diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index af3e2546..bac1194c 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -3,10 +3,50 @@ package JSONReportFormatter; use Mo; use JSON; +use Transformers qw(make_checksum); + use constant PTDEBUG => $ENV{PTDEBUG} || 0; extends qw(QueryReportFormatter); +has history_metrics => ( + is => 'ro', + isa => 'HashRef', +); + +sub BUILDARGS { + my $class = shift; + my %orig_args = @_; + my $args = $class->SUPER::BUILDARGS(@_); + + my $o = $orig_args{OptionParser}; + + my $sql = $o->read_para_after( + __FILE__, qr/MAGIC_create_review_history/); + + my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); + $pat = qr/\ {3}(\w+?)_($pat)\s+/; + + my %metrics; + foreach my $sql_line (split /\n/, $sql) { + my ( $attr, $metric ) = $sql_line =~ $pat; + next unless $attr && $metric; + + $attr = ucfirst $attr if $attr =~ m/_/; + $attr = 'Filesort' if $attr eq 'filesort'; + + $attr =~ s/^Qc_hit/QC_Hit/; # Qc_hit is really QC_Hit + $attr =~ s/^Innodb/InnoDB/g; # Innodb is really InnoDB + $attr =~ s/_io_/_IO_/g; # io is really IO + + $metrics{$attr}{$metric} = 1; + } + + $args->{history_metrics} = \%metrics; + + return $args; +} + override [qw(rusage date hostname files header profile prepared)] => sub { return; }; @@ -21,63 +61,41 @@ override query_report => sub { foreach my $arg ( qw(ea worst orderby groupby) ) { die "I need a $arg argument" unless defined $arg; } - my $ea = $args{ea}; - my $groupby = $args{groupby}; - my $worst = $args{worst}; - my $q = $self->Quoter; - my $qv = $self->{QueryReview}; - my $qr = $self->{QueryRewriter}; + my $ea = $args{ea}; + my $worst = $args{worst}; - my $query_report_vals = $self->query_report_values(%args); + my $history_metrics = $self->history_metrics; + my @attribs = grep { $history_metrics->{$_} } @{$ea->get_attributes()}; - # Sort the attributes, removing any hidden attributes. - my $attribs = $self->sort_attribs( - ($args{select} ? $args{select} : $ea->get_attributes()), - $ea, - ); + my @queries; + foreach my $worst_info ( @$worst ) { + my $item = $worst_info->[0]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; - ITEM: - foreach my $vals ( @$query_report_vals ) { - my $item = $vals->{item}; - my $samp_query = $vals->{samp_query}; - # ############################################################### - # Print the standard query analysis report. - # ############################################################### - $vals->{event_report} = $self->event_report( - %args, - item => $item, - sample => $ea->results->{samples}->{$item}, - rank => $vals->{rank}, - reason => $vals->{reason}, - attribs => $attribs, - db => $vals->{default_db}, - ); + my %metrics; + foreach my $attrib ( @attribs ) { + $metrics{$attrib} = $ea->metrics( + attrib => $attrib, + where => $item, + ); - if ( $groupby eq 'fingerprint' ) { - if ( $item =~ m/^(?:[\(\s]*select|insert|replace)/ ) { - if ( $item !~ m/^(?:insert|replace)/ ) { # No EXPLAIN - $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$samp_query\\G\n"; - $vals->{explain_report} = $self->explain_report($samp_query, $vals->{default_db}); - } - } - else { - my $converted = $qr->convert_to_select($samp_query); - if ( $converted - && $converted =~ m/^[\(\s]*select/i ) { - $vals->{for_explain} = "EXPLAIN /*!50100 PARTITIONS*/\n$converted\\G\n"; - } - } - } - else { - if ( $groupby eq 'tables' ) { - my ( $db, $tbl ) = $q->split_unquote($item); - $vals->{tables_report} = $self->tables_report([$db, $tbl]); + my $needed_metrics = $history_metrics->{$attrib}; + for my $key ( keys %{$metrics{$attrib}} ) { + delete $metrics{$attrib}{$key} + unless $needed_metrics->{$key}; } } + + push @queries, { + sample => $sample, + checksum => make_checksum($item), + %metrics + }; } - return encode_json($query_report_vals) . "\n"; + return encode_json(\@queries) . "\n"; }; 1; From 1f7e1c12e2489d7b6271fe4dc350e4ee25f1f42b Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 16 Jan 2013 16:46:40 -0300 Subject: [PATCH 16/34] lib/JSONReportFormatter.pm: Change the json output struct to the new spec --- bin/pt-query-digest | 97 +++++++++++++++++--------------------- lib/JSONReportFormatter.pm | 97 +++++++++++++++++--------------------- 2 files changed, 86 insertions(+), 108 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 0b1b71b5..cd735aa4 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -7979,50 +7979,14 @@ package JSONReportFormatter; use Mo; use JSON; -use Transformers qw(make_checksum); +use List::Util qw(sum); + +use Transformers qw(make_checksum parse_timestamp); use constant PTDEBUG => $ENV{PTDEBUG} || 0; extends qw(QueryReportFormatter); -has history_metrics => ( - is => 'ro', - isa => 'HashRef', -); - -sub BUILDARGS { - my $class = shift; - my %orig_args = @_; - my $args = $class->SUPER::BUILDARGS(@_); - - my $o = $orig_args{OptionParser}; - - my $sql = $o->read_para_after( - __FILE__, qr/MAGIC_create_review_history/); - - my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); - $pat = qr/\ {3}(\w+?)_($pat)\s+/; - - my %metrics; - foreach my $sql_line (split /\n/, $sql) { - my ( $attr, $metric ) = $sql_line =~ $pat; - next unless $attr && $metric; - - $attr = ucfirst $attr if $attr =~ m/_/; - $attr = 'Filesort' if $attr eq 'filesort'; - - $attr =~ s/^Qc_hit/QC_Hit/; # Qc_hit is really QC_Hit - $attr =~ s/^Innodb/InnoDB/g; # Innodb is really InnoDB - $attr =~ s/_io_/_IO_/g; # io is really IO - - $metrics{$attr}{$metric} = 1; - } - - $args->{history_metrics} = \%metrics; - - return $args; -} - override [qw(rusage date hostname files header profile prepared)] => sub { return; }; @@ -8041,33 +8005,58 @@ override query_report => sub { my $ea = $args{ea}; my $worst = $args{worst}; - my $history_metrics = $self->history_metrics; - my @attribs = grep { $history_metrics->{$_} } @{$ea->get_attributes()}; - + my @attribs = @{$ea->get_attributes()}; + my @queries; foreach my $worst_info ( @$worst ) { - my $item = $worst_info->[0]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; + my $item = $worst_info->[0]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + my $all_log_pos = $ea->{result_classes}->{$item}->{pos_in_log}->{all}; + my $times_seen = sum values %$all_log_pos; + + my %class = ( + sample => $sample->{arg}, + fingerprint => $item, + checksum => make_checksum($item), + cnt => $times_seen, + ); + my %metrics; foreach my $attrib ( @attribs ) { $metrics{$attrib} = $ea->metrics( attrib => $attrib, where => $item, ); - - my $needed_metrics = $history_metrics->{$attrib}; - for my $key ( keys %{$metrics{$attrib}} ) { - delete $metrics{$attrib}{$key} - unless $needed_metrics->{$key}; - } } + foreach my $attrib ( keys %metrics ) { + if ( ! grep { $_ } values %{$metrics{$attrib}} ) { + delete $metrics{$attrib}; + next; + } + + if ($attrib eq 'ts') { + my $ts = delete $metrics{ts}; + foreach my $thing ( qw(min max) ) { + next unless defined $ts && defined $ts->{$thing}; + $ts->{$thing} = parse_timestamp($ts->{$thing}); + } + $class{ts_min} = $ts->{min}; + $class{ts_max} = $ts->{max}; + } + elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { + for my $value ( values %{$metrics{$attrib}} ) { + next unless $value; + $value = sprintf '%.7f', $value; + } + } + } + push @queries, { - sample => $sample, - checksum => make_checksum($item), - %metrics + class => \%class, + attributes => \%metrics, }; } diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index bac1194c..e2cc0a31 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -3,50 +3,14 @@ package JSONReportFormatter; use Mo; use JSON; -use Transformers qw(make_checksum); +use List::Util qw(sum); + +use Transformers qw(make_checksum parse_timestamp); use constant PTDEBUG => $ENV{PTDEBUG} || 0; extends qw(QueryReportFormatter); -has history_metrics => ( - is => 'ro', - isa => 'HashRef', -); - -sub BUILDARGS { - my $class = shift; - my %orig_args = @_; - my $args = $class->SUPER::BUILDARGS(@_); - - my $o = $orig_args{OptionParser}; - - my $sql = $o->read_para_after( - __FILE__, qr/MAGIC_create_review_history/); - - my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); - $pat = qr/\ {3}(\w+?)_($pat)\s+/; - - my %metrics; - foreach my $sql_line (split /\n/, $sql) { - my ( $attr, $metric ) = $sql_line =~ $pat; - next unless $attr && $metric; - - $attr = ucfirst $attr if $attr =~ m/_/; - $attr = 'Filesort' if $attr eq 'filesort'; - - $attr =~ s/^Qc_hit/QC_Hit/; # Qc_hit is really QC_Hit - $attr =~ s/^Innodb/InnoDB/g; # Innodb is really InnoDB - $attr =~ s/_io_/_IO_/g; # io is really IO - - $metrics{$attr}{$metric} = 1; - } - - $args->{history_metrics} = \%metrics; - - return $args; -} - override [qw(rusage date hostname files header profile prepared)] => sub { return; }; @@ -65,33 +29,58 @@ override query_report => sub { my $ea = $args{ea}; my $worst = $args{worst}; - my $history_metrics = $self->history_metrics; - my @attribs = grep { $history_metrics->{$_} } @{$ea->get_attributes()}; - + my @attribs = @{$ea->get_attributes()}; + my @queries; foreach my $worst_info ( @$worst ) { - my $item = $worst_info->[0]; - my $stats = $ea->results->{classes}->{$item}; - my $sample = $ea->results->{samples}->{$item}; + my $item = $worst_info->[0]; + my $stats = $ea->results->{classes}->{$item}; + my $sample = $ea->results->{samples}->{$item}; + my $all_log_pos = $ea->{result_classes}->{$item}->{pos_in_log}->{all}; + my $times_seen = sum values %$all_log_pos; + + my %class = ( + sample => $sample->{arg}, + fingerprint => $item, + checksum => make_checksum($item), + cnt => $times_seen, + ); + my %metrics; foreach my $attrib ( @attribs ) { $metrics{$attrib} = $ea->metrics( attrib => $attrib, where => $item, ); - - my $needed_metrics = $history_metrics->{$attrib}; - for my $key ( keys %{$metrics{$attrib}} ) { - delete $metrics{$attrib}{$key} - unless $needed_metrics->{$key}; - } } + foreach my $attrib ( keys %metrics ) { + if ( ! grep { $_ } values %{$metrics{$attrib}} ) { + delete $metrics{$attrib}; + next; + } + + if ($attrib eq 'ts') { + my $ts = delete $metrics{ts}; + foreach my $thing ( qw(min max) ) { + next unless defined $ts && defined $ts->{$thing}; + $ts->{$thing} = parse_timestamp($ts->{$thing}); + } + $class{ts_min} = $ts->{min}; + $class{ts_max} = $ts->{max}; + } + elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { + for my $value ( values %{$metrics{$attrib}} ) { + next unless $value; + $value = sprintf '%.7f', $value; + } + } + } + push @queries, { - sample => $sample, - checksum => make_checksum($item), - %metrics + class => \%class, + attributes => \%metrics, }; } From 27e6d025f8b9383be00efa19598d0c03c3756b8a Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Wed, 16 Jan 2013 13:30:50 -0700 Subject: [PATCH 17/34] Rewrite and clean up the docs to reflect maybe the new usage. --- bin/pt-query-digest | 764 +++++++++++++++++++------------------------- 1 file changed, 336 insertions(+), 428 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index cd735aa4..ae7b93ba 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -15167,37 +15167,29 @@ if ( !caller ) { exit main(@ARGV); } =head1 NAME -pt-query-digest - Analyze query execution logs and generate a query report, filter, replay, or transform queries for MySQL, PostgreSQL, memcached, and more. +pt-query-digest - Analyze and report on MySQL queries. =head1 SYNOPSIS -Usage: pt-query-digest [OPTION...] [FILE] +Usage: pt-query-digest [OPTIONS] [DSN] [FILES] -pt-query-digest parses and analyzes MySQL log files. With no FILE, or when -FILE is -, it read standard input. +pt-query-digest analyzes MySQL queries from logs, C, and +L. By default, queries are grouped by fingerprint and reported +in descending order of total execution time. Specifying L<"--review"> and +a C saves query data in a table for later analysis and historical +trending. The tool reads C if no C are given. -Analyze, aggregate, and report on a slow query log: +Analyze and report on queries in C: - pt-query-digest /path/to/slow.log + pt-query-digest slow.log -Review a slow log, saving results to the test.query_review table in a MySQL -server running on host1. See L<"--review"> for more on reviewing queries: +Save query data in MySQL on host1: - pt-query-digest --review h=host1,D=test,t=query_review /path/to/slow.log + pt-query-digest slow.log h=host1 --review -Print the structure of events so you can construct a complex L<"--filter">: +Get queries from C on host1: - pt-query-digest /path/to/slow.log --no-report \ - --filter 'print Dumper($event)' - -Watch SHOW FULL PROCESSLIST and output a log in slow query log format: - - pt-query-digest --processlist h=host1 --print --no-report - -The default aggregation and analysis is CPU and memory intensive. Disable it if -you don't need the default report: - - pt-query-digest --no-report + pt-query-digest --processlist h=host1 =head1 RISKS @@ -15500,10 +15492,6 @@ example, You can see how useful this meta-data is -- as you analyze your queries, you get your comments integrated right into the report. -If you add the L<"--review-history"> option, it will also store information into -a separate database table, so you can keep historical trending information on -classes of queries. - =back =head1 FINGERPRINTS @@ -15606,9 +15594,6 @@ Collapse multiple identical UNION queries into a single one. =head1 OPTIONS -DSN values in L<"--review-history"> default to values in L<"--review"> if COPY -is yes. - This tool accepts additional command-line arguments. Refer to the L<"SYNOPSIS"> and usage information for details. @@ -15644,19 +15629,6 @@ This option deals with bugs in slow-logging functionality that causes large values for attributes. If the attribute's value is bigger than this, the last-seen value for that class of query is used instead. -=item --aux-dsn - -type: DSN - -Auxiliary DSN used for special options. - -The following options may require a DSN even when only parsing a slow log file: - - * --since - * --until - -See each option for why it might require a DSN. - =item --charset short form: -A; type: string @@ -15695,16 +15667,20 @@ default: yes Continue parsing even if there is an error. -=item --create-review-history-table +=item --[no]create-history-table -Create the L<"--review-history"> table if it does not exist. +default: yes -This option causes the table specified by L<"--review-history"> to be created -with the default structure shown in the documentation for that option. +Create the L<"--review"> tables if they do not exist. -=item --create-review-table +This option causes the table specified by L<"--review"> to be created with the +default structure shown in the documentation for that option. -Create the L<"--review"> table if it does not exist. +=item --[no]create-review-table + +default: yes + +Create the L<"--review"> tables if they do not exist. This option causes the table specified by L<"--review"> to be created with the default structure shown in the documentation for that option. @@ -15925,324 +15901,11 @@ L<"ATTRIBUTES">). Show help and exit. -=item --host +=item --history -short form: -h; type: string +type: string; default: percona_schema.query_history -Connect to host. - -=item --ignore-attributes - -type: array; default: arg, cmd, insert_id, ip, port, Thread_id, timestamp, exptime, flags, key, res, val, server_id, offset, end_log_pos, Xid - -Do not aggregate these attributes when auto-detecting L<"--select">. - -If you do not specify L<"--select"> then pt-query-digest auto-detects and -aggregates every attribute that it finds in the slow log. Some attributes, -however, should not be aggregated. This option allows you to specify a list -of attributes to ignore. This only works when no explicit L<"--select"> is -given. - -=item --inherit-attributes - -type: array; default: db,ts - -If missing, inherit these attributes from the last event that had them. - -This option sets which attributes are inherited or carried forward to events -which do not have them. For example, if one event has the db attribute equal -to "foo", but the next event doesn't have the db attribute, then it inherits -"foo" for its db attribute. - -=item --interval - -type: float; default: .1 - -How frequently to poll the processlist, in seconds. - -=item --iterations - -type: int; default: 1 - -How many times to iterate through the collect-and-report cycle. If 0, iterate -to infinity. Each iteration runs for L<"--run-time"> amount of time. An -iteration is usually determined by an amount of time and a report is printed -when that amount of time elapses. With L<"--run-time-mode"> C, -an interval is instead determined by the interval time you specify with -L<"--run-time">. See L<"--run-time"> and L<"--run-time-mode"> for more -information. - -=item --limit - -type: Array; default: 95%:20 - -Limit output to the given percentage or count. - -If the argument is an integer, report only the top N worst queries. If the -argument is an integer followed by the C<%> sign, report that percentage of the -worst queries. If the percentage is followed by a colon and another integer, -report the top percentage or the number specified by that integer, whichever -comes first. - -The value is actually a comma-separated array of values, one for each item in -L<"--group-by">. If you don't specify a value for any of those items, the -default is the top 95%. - -See also L<"--outliers">. - -=item --log - -type: string - -Print all output to this file when daemonized. - -=item --order-by - -type: Array; default: Query_time:sum - -Sort events by this attribute and aggregate function. - -This is a comma-separated list of order-by expressions, one for each -L<"--group-by"> attribute. The default C is used for -L<"--group-by"> attributes without explicitly given L<"--order-by"> attributes -(that is, if you specify more L<"--group-by"> attributes than corresponding -L<"--order-by"> attributes). The syntax is C. See -L<"ATTRIBUTES"> for valid attributes. Valid aggregates are: - - Aggregate Meaning - ========= ============================ - sum Sum/total attribute value - min Minimum attribute value - max Maximum attribute value - cnt Frequency/count of the query - -For example, the default C means that queries in the -query analysis report will be ordered (sorted) by their total query execution -time ("Exec time"). C orders the queries by their -maximum query execution time, so the query with the single largest -C will be list first. C refers more to the frequency -of the query as a whole, how often it appears; "Count" is its corresponding -line in the query analysis report. So any attribute and C should yield -the same report wherein queries are sorted by the number of times they -appear. - -When parsing general logs (L<"--type"> C), the default L<"--order-by"> -becomes C. General logs do not report query times so only -the C aggregate makes sense because all query times are zero. - -If you specify an attribute that doesn't exist in the events, then -pt-query-digest falls back to the default C and prints a notice -at the beginning of the report for each query class. You can create attributes -with L<"--filter"> and order by them; see L<"ATTRIBUTES"> for an example. - -=item --outliers - -type: array; default: Query_time:1:10 - -Report outliers by attribute:percentile:count. - -The syntax of this option is a comma-separated list of colon-delimited strings. -The first field is the attribute by which an outlier is defined. The second is -a number that is compared to the attribute's 95th percentile. The third is -optional, and is compared to the attribute's cnt aggregate. Queries that pass -this specification are added to the report, regardless of any limits you -specified in L<"--limit">. - -For example, to report queries whose 95th percentile Query_time is at least 60 -seconds and which are seen at least 5 times, use the following argument: - - --outliers Query_time:60:5 - -You can specify an --outliers option for each value in L<"--group-by">. - - -=item --output - -type: string; default: query - -Type of report to use. Accepted values are C<"query"> and C<"json">. - -=item --password - -short form: -p; type: string - -Password to use when connecting. - -=item --pid - -type: string - -Create the given PID file when daemonized. The file contains the process -ID of the daemonized instance. The PID file is removed when the -daemonized instance exits. The program checks for the existence of the -PID file when starting; if it exists and the process with the matching PID -exists, the program exits. - -=item --port - -short form: -P; type: int - -Port number to use for connection. - -=item --print - -Print log events to STDOUT in standard slow-query-log format. - -=item --print-iterations - -Print the start time for each L<"--iterations">. - -This option causes a line like the following to be printed at the start -of each L<"--iterations"> report: - - # Iteration 2 started at 2009-11-24T14:39:48.345780 - -This line will print even if C<--no-report> is specified. If C<--iterations 0> -is specified, each iteration number will be C<0>. - -=item --processlist - -type: DSN - -Poll this DSN's processlist for queries, with L<"--interval"> sleep between. - -If the connection fails, pt-query-digest tries to reopen it once per second. - -=item --progress - -type: array; default: time,30 - -Print progress reports to STDERR. The value is a comma-separated list with two -parts. The first part can be percentage, time, or iterations; the second part -specifies how often an update should be printed, in percentage, seconds, or -number of iterations. - -=item --read-timeout - -type: time; default: 0 - -Wait this long for an event from the input; 0 to wait forever. - -This option sets the maximum time to wait for an event from the input. It -applies to all types of input except L<"--processlist">. If an -event is not received after the specified time, the script stops reading the -input and prints its reports. If L<"--iterations"> is 0 or greater than -1, the next iteration will begin, else the script will exit. - -This option requires the Perl POSIX module. - -=item --[no]report - -default: yes - -Print out reports on the aggregate results from L<"--group-by">. - -This is the standard slow-log analysis functionality. See L<"OUTPUT"> for the -description of what this does and what the results look like. - -=item --report-all - -Include all queries, even if they have already been reviewed. - -=item --report-format - -type: Array; default: rusage,date,hostname,files,header,profile,query_report,prepared - -Print these sections of the query analysis report. - - SECTION PRINTS - ============ ====================================================== - rusage CPU times and memory usage reported by ps - date Current local date and time - hostname Hostname of machine on which pt-query-digest was run - files Input files read/parse - header Summary of the entire analysis run - profile Compact table of queries for an overview of the report - query_report Detailed information about each unique query - prepared Prepared statements - -The sections are printed in the order specified. The rusage, date, files and -header sections are grouped together if specified together; other sections are -separated by blank lines. - -See L<"OUTPUT"> for more information on the various parts of the query report. - -=item --report-histogram - -type: string; default: Query_time - -Chart the distribution of this attribute's values. - -The distribution chart is limited to time-based attributes, so charting -C, for example, will produce a useless chart. Charts look -like: - - # Query_time distribution - # 1us - # 10us - # 100us - # 1ms - # 10ms ################################ - # 100ms ################################################################ - # 1s ######## - # 10s+ - -See L<"OUTPUT"> for more information. - -=item --resume - -If enabled, the tool will save the furthest it got into the log before exiting; -Future runs on that log with --resume enabled will start from that position. - -=item --review - -type: DSN - -Store a sample of each class of query in this DSN. - -The argument specifies a table to store all unique query fingerprints in. The -table must have at least the following columns. You can add more columns for -your own special purposes, but they won't be used by pt-query-digest. The -following CREATE TABLE definition is also used for L<"--create-review-table">. -MAGIC_create_review: - - CREATE TABLE query_review ( - checksum BIGINT UNSIGNED NOT NULL PRIMARY KEY, - fingerprint TEXT NOT NULL, - sample TEXT NOT NULL, - first_seen DATETIME, - last_seen DATETIME, - reviewed_by VARCHAR(20), - reviewed_on DATETIME, - comments TEXT - ) - -The columns are as follows: - - COLUMN MEANING - =========== =============== - checksum A 64-bit checksum of the query fingerprint - fingerprint The abstracted version of the query; its primary key - sample The query text of a sample of the class of queries - first_seen The smallest timestamp of this class of queries - last_seen The largest timestamp of this class of queries - reviewed_by Initially NULL; if set, query is skipped thereafter - reviewed_on Initially NULL; not assigned any special meaning - comments Initially NULL; not assigned any special meaning - -Note that the C column is the true primary key for a class of -queries. The C is just a cryptographic hash of this value, which -provides a shorter value that is very likely to also be unique. - -After parsing and aggregating events, your table should contain a row for each -fingerprint. This option depends on C<--group-by fingerprint> (which is the -default). It will not work otherwise. - -=item --review-history - -type: DSN - -The table in which to store historical values for review trend analysis. +The table in which to store query data for historical trend analysis. Each time you review queries with L<"--review">, pt-query-digest will save information into this table so you can see how classes of queries have changed @@ -16280,7 +15943,7 @@ you could also just add a ts_min column and make it a DATE type, so you'd get one row per class of queries per day. The default table structure follows. The following MAGIC_create_review_history -table definition is used for L<"--create-review-history-table">: +table definition is used for L<"--create-review-tables">: CREATE TABLE query_review_history ( checksum BIGINT UNSIGNED NOT NULL, @@ -16386,6 +16049,312 @@ table definition is used for L<"--create-review-history-table">: Note that we store the count (cnt) for the ts attribute only; it will be redundant to store this for other attributes. +=item --host + +short form: -h; type: string + +Connect to host. + +=item --ignore-attributes + +type: array; default: arg, cmd, insert_id, ip, port, Thread_id, timestamp, exptime, flags, key, res, val, server_id, offset, end_log_pos, Xid + +Do not aggregate these attributes when auto-detecting L<"--select">. + +If you do not specify L<"--select"> then pt-query-digest auto-detects and +aggregates every attribute that it finds in the slow log. Some attributes, +however, should not be aggregated. This option allows you to specify a list +of attributes to ignore. This only works when no explicit L<"--select"> is +given. + +=item --inherit-attributes + +type: array; default: db,ts + +If missing, inherit these attributes from the last event that had them. + +This option sets which attributes are inherited or carried forward to events +which do not have them. For example, if one event has the db attribute equal +to "foo", but the next event doesn't have the db attribute, then it inherits +"foo" for its db attribute. + +=item --interval + +type: float; default: .1 + +How frequently to poll the L<"--processlist">, in seconds. + +=item --iterations + +type: int; default: 1 + +How many times to iterate through the collect-and-report cycle. If 0, iterate +to infinity. Each iteration runs for L<"--run-time"> amount of time. An +iteration is usually determined by an amount of time and a report is printed +when that amount of time elapses. With L<"--run-time-mode"> C, +an interval is instead determined by the interval time you specify with +L<"--run-time">. See L<"--run-time"> and L<"--run-time-mode"> for more +information. + +=item --limit + +type: Array; default: 95%:20 + +Limit output to the given percentage or count. + +If the argument is an integer, report only the top N worst queries. If the +argument is an integer followed by the C<%> sign, report that percentage of the +worst queries. If the percentage is followed by a colon and another integer, +report the top percentage or the number specified by that integer, whichever +comes first. + +The value is actually a comma-separated array of values, one for each item in +L<"--group-by">. If you don't specify a value for any of those items, the +default is the top 95%. + +See also L<"--outliers">. + +=item --log + +type: string + +Print all output to this file when daemonized. + +=item --order-by + +type: Array; default: Query_time:sum + +Sort events by this attribute and aggregate function. + +This is a comma-separated list of order-by expressions, one for each +L<"--group-by"> attribute. The default C is used for +L<"--group-by"> attributes without explicitly given L<"--order-by"> attributes +(that is, if you specify more L<"--group-by"> attributes than corresponding +L<"--order-by"> attributes). The syntax is C. See +L<"ATTRIBUTES"> for valid attributes. Valid aggregates are: + + Aggregate Meaning + ========= ============================ + sum Sum/total attribute value + min Minimum attribute value + max Maximum attribute value + cnt Frequency/count of the query + +For example, the default C means that queries in the +query analysis report will be ordered (sorted) by their total query execution +time ("Exec time"). C orders the queries by their +maximum query execution time, so the query with the single largest +C will be list first. C refers more to the frequency +of the query as a whole, how often it appears; "Count" is its corresponding +line in the query analysis report. So any attribute and C should yield +the same report wherein queries are sorted by the number of times they +appear. + +When parsing general logs (L<"--type"> C), the default L<"--order-by"> +becomes C. General logs do not report query times so only +the C aggregate makes sense because all query times are zero. + +If you specify an attribute that doesn't exist in the events, then +pt-query-digest falls back to the default C and prints a notice +at the beginning of the report for each query class. You can create attributes +with L<"--filter"> and order by them; see L<"ATTRIBUTES"> for an example. + +=item --outliers + +type: array; default: Query_time:1:10 + +Report outliers by attribute:percentile:count. + +The syntax of this option is a comma-separated list of colon-delimited strings. +The first field is the attribute by which an outlier is defined. The second is +a number that is compared to the attribute's 95th percentile. The third is +optional, and is compared to the attribute's cnt aggregate. Queries that pass +this specification are added to the report, regardless of any limits you +specified in L<"--limit">. + +For example, to report queries whose 95th percentile Query_time is at least 60 +seconds and which are seen at least 5 times, use the following argument: + + --outliers Query_time:60:5 + +You can specify an --outliers option for each value in L<"--group-by">. + +=item --output + +type: string; default: report + +Type of report to print to C. + +Accepted values are C, C, C, and C. + +C is useful with L<"--review"> and L<"--history"> when you only want +to save the query data and not have a printed reported. + +=item --password + +short form: -p; type: string + +Password to use when connecting. + +=item --pid + +type: string + +Create the given PID file when daemonized. The file contains the process +ID of the daemonized instance. The PID file is removed when the +daemonized instance exits. The program checks for the existence of the +PID file when starting; if it exists and the process with the matching PID +exists, the program exits. + +=item --port + +short form: -P; type: int + +Port number to use for connection. + +=item --print-iterations + +Print the start time for each L<"--iterations">. + +This option causes a line like the following to be printed at the start +of each L<"--iterations"> report: + + # Iteration 2 started at 2009-11-24T14:39:48.345780 + +This line will print even if C<--no-report> is specified. If C<--iterations 0> +is specified, each iteration number will be C<0>. + +=item --processlist + +type: DSN + +Poll this DSN's processlist for queries, with L<"--interval"> sleep between. + +If the connection fails, pt-query-digest tries to reopen it once per second. + +The DSN inherits from the DSN given on the command line, if any. + +=item --progress + +type: array; default: time,30 + +Print progress reports to STDERR. The value is a comma-separated list with two +parts. The first part can be percentage, time, or iterations; the second part +specifies how often an update should be printed, in percentage, seconds, or +number of iterations. + +=item --read-timeout + +type: time; default: 0 + +Wait this long for an event from the input; 0 to wait forever. + +This option sets the maximum time to wait for an event from the input. It +applies to all types of input except L<"--processlist">. If an +event is not received after the specified time, the script stops reading the +input and prints its reports. If L<"--iterations"> is 0 or greater than +1, the next iteration will begin, else the script will exit. + +This option requires the Perl POSIX module. + +=item --report-all + +Include all queries, even if they have already been reviewed. + +=item --report-format + +type: Array; default: rusage,date,hostname,files,header,profile,query_report,prepared + +Print these sections of the query analysis report. + + SECTION PRINTS + ============ ====================================================== + rusage CPU times and memory usage reported by ps + date Current local date and time + hostname Hostname of machine on which pt-query-digest was run + files Input files read/parse + header Summary of the entire analysis run + profile Compact table of queries for an overview of the report + query_report Detailed information about each unique query + prepared Prepared statements + +The sections are printed in the order specified. The rusage, date, files and +header sections are grouped together if specified together; other sections are +separated by blank lines. + +See L<"OUTPUT"> for more information on the various parts of the query report. + +=item --report-histogram + +type: string; default: Query_time + +Chart the distribution of this attribute's values. + +The distribution chart is limited to time-based attributes, so charting +C, for example, will produce a useless chart. Charts look +like: + + # Query_time distribution + # 1us + # 10us + # 100us + # 1ms + # 10ms ################################ + # 100ms ################################################################ + # 1s ######## + # 10s+ + +See L<"OUTPUT"> for more information. + +=item --resume + +If enabled, the tool will save the furthest it got into the log before exiting; +Future runs on that log with --resume enabled will start from that position. + +=item --review + +type: string; default: percona_schema.query_review + +Store a sample of each class of query in this DSN. + +The argument specifies a table to store all unique query fingerprints in. The +table must have at least the following columns. You can add more columns for +your own special purposes, but they won't be used by pt-query-digest. The +following CREATE TABLE definition is also used for L<"--create-review-tables">. +MAGIC_create_review: + + CREATE TABLE query_review ( + checksum BIGINT UNSIGNED NOT NULL PRIMARY KEY, + fingerprint TEXT NOT NULL, + sample TEXT NOT NULL, + first_seen DATETIME, + last_seen DATETIME, + reviewed_by VARCHAR(20), + reviewed_on DATETIME, + comments TEXT + ) + +The columns are as follows: + + COLUMN MEANING + =========== =============== + checksum A 64-bit checksum of the query fingerprint + fingerprint The abstracted version of the query; its primary key + sample The query text of a sample of the class of queries + first_seen The smallest timestamp of this class of queries + last_seen The largest timestamp of this class of queries + reviewed_by Initially NULL; if set, query is skipped thereafter + reviewed_on Initially NULL; not assigned any special meaning + comments Initially NULL; not assigned any special meaning + +Note that the C column is the true primary key for a class of +queries. The C is just a cryptographic hash of this value, which +provides a shorter value that is very likely to also be unique. + +After parsing and aggregating events, your table should contain a row for each +fingerprint. This option depends on C<--group-by fingerprint> (which is the +default). It will not work otherwise. + =item --run-time type: time @@ -16488,8 +16457,8 @@ Previously, pt-query-digest only aggregated these attributes: Query_time,Lock_time,Rows_sent,Rows_examined,user,db:Schema,ts -Attributes specified in the L<"--review-history"> table will always be selected -even if you do not specify L<"--select">. +Attributes specified in the review history table (see L<"--review"> will +always be selected even if you do not specify L<"--select">. See also L<"--ignore-attributes"> and L<"ATTRIBUTES">. @@ -16550,11 +16519,7 @@ several types: CURRENT_DATE - INTERVAL 7 DAY If you give a MySQL time expression, then you must also specify a DSN -so that pt-query-digest can connect to MySQL to evaluate the expression. If you -specify L<"--explain">, L<"--processlist">, L<"--review"> -or L<"--review-history">, then one of these DSNs will be used automatically. -Otherwise, you must specify an L<"--aux-dsn"> or pt-query-digest will die -saying that the value is invalid. +so that pt-query-digest can connect to MySQL to evaluate the expression. The MySQL time expression is wrapped inside a query like "SELECT UNIX_TIMESTAMP()", so be sure that the expression is @@ -16631,52 +16596,6 @@ Parse a MySQL general log file. General logs lack a lot of L<"ATTRIBUTES">, notably C. The default L<"--order-by"> for general logs changes to C. -=item http - -Parse HTTP traffic from tcpdump. - -=item pglog - -Parse a log file in PostgreSQL format. The parser will automatically recognize -logs sent to syslog and transparently parse the syslog format, too. The -recommended configuration for logging in your postgresql.conf is as follows. - -The log_destination setting can be set to either syslog or stderr. Syslog has -the added benefit of not interleaving log messages from several sessions -concurrently, which the parser cannot handle, so this might be better than -stderr. CSV-formatted logs are not supported at this time. - -The log_min_duration_statement setting should be set to 0 to capture all -statements with their durations. Alternatively, the parser will also recognize -and handle various combinations of log_duration and log_statement. - -You may enable log_connections and log_disconnections, but this is optional. - -It is highly recommended to set your log_line_prefix to the following: - - log_line_prefix = '%m c=%c,u=%u,D=%d ' - -This lets the parser find timestamps with milliseconds, session IDs, users, and -databases from the log. If these items are missing, you'll simply get less -information to analyze. For compatibility with other log analysis tools such as -PQA and pgfouine, various log line prefix formats are supported. The general -format is as follows: a timestamp can be detected and extracted (the syslog -timestamp is NOT parsed), and a name=value list of properties can also. -Although the suggested format is as shown above, any name=value list will be -captured and interpreted by using the first letter of the 'name' part, -lowercased, to determine the meaning of the item. The lowercased first letter -is interpreted to mean the same thing as PostgreSQL's built-in %-codes for the -log_line_prefix format string. For example, u means user, so unicorn=fred -will be interpreted as user=fred; d means database, so D=john will be -interpreted as database=john. The pgfouine-suggested formatting is user=%u and -db=%d, so it should Just Work regardless of which format you choose. The main -thing is to add as much information as possible into the log_line_prefix to -permit richer analysis. - -Currently, only English locale messages are supported, so if your server's -locale is set to something else, the log won't be parsed properly. (Log -messages with "duration:" and "statement:" won't be recognized.) - =item slowlog Parse a log file in any variation of MySQL slow-log format. @@ -16742,17 +16661,6 @@ database. Server-side prepared statements are supported. SSL-encrypted traffic cannot be inspected and decoded. -=item memcached - -Similar to tcpdump, but the expected input is memcached packets -instead of MySQL packets. For example: - - tcpdump -i any port 11211 -s 65535 -x -nn -q -tttt \ - > memcached.tcp.txt - pt-query-digest --type memcached memcached.tcp.txt - -memcached uses port 11211 by default. - =back =item --until From 16bb2d3c0e7d6ba2b4af23c0f7deb434171aabfc Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Thu, 17 Jan 2013 09:17:48 -0700 Subject: [PATCH 18/34] Restore r534 of pqd so it works for testing. --- bin/pt-query-digest | 764 +++++++++++++++++++++++++------------------- 1 file changed, 428 insertions(+), 336 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index ae7b93ba..cd735aa4 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -15167,29 +15167,37 @@ if ( !caller ) { exit main(@ARGV); } =head1 NAME -pt-query-digest - Analyze and report on MySQL queries. +pt-query-digest - Analyze query execution logs and generate a query report, filter, replay, or transform queries for MySQL, PostgreSQL, memcached, and more. =head1 SYNOPSIS -Usage: pt-query-digest [OPTIONS] [DSN] [FILES] +Usage: pt-query-digest [OPTION...] [FILE] -pt-query-digest analyzes MySQL queries from logs, C, and -L. By default, queries are grouped by fingerprint and reported -in descending order of total execution time. Specifying L<"--review"> and -a C saves query data in a table for later analysis and historical -trending. The tool reads C if no C are given. +pt-query-digest parses and analyzes MySQL log files. With no FILE, or when +FILE is -, it read standard input. -Analyze and report on queries in C: +Analyze, aggregate, and report on a slow query log: - pt-query-digest slow.log + pt-query-digest /path/to/slow.log -Save query data in MySQL on host1: +Review a slow log, saving results to the test.query_review table in a MySQL +server running on host1. See L<"--review"> for more on reviewing queries: - pt-query-digest slow.log h=host1 --review + pt-query-digest --review h=host1,D=test,t=query_review /path/to/slow.log -Get queries from C on host1: +Print the structure of events so you can construct a complex L<"--filter">: - pt-query-digest --processlist h=host1 + pt-query-digest /path/to/slow.log --no-report \ + --filter 'print Dumper($event)' + +Watch SHOW FULL PROCESSLIST and output a log in slow query log format: + + pt-query-digest --processlist h=host1 --print --no-report + +The default aggregation and analysis is CPU and memory intensive. Disable it if +you don't need the default report: + + pt-query-digest --no-report =head1 RISKS @@ -15492,6 +15500,10 @@ example, You can see how useful this meta-data is -- as you analyze your queries, you get your comments integrated right into the report. +If you add the L<"--review-history"> option, it will also store information into +a separate database table, so you can keep historical trending information on +classes of queries. + =back =head1 FINGERPRINTS @@ -15594,6 +15606,9 @@ Collapse multiple identical UNION queries into a single one. =head1 OPTIONS +DSN values in L<"--review-history"> default to values in L<"--review"> if COPY +is yes. + This tool accepts additional command-line arguments. Refer to the L<"SYNOPSIS"> and usage information for details. @@ -15629,6 +15644,19 @@ This option deals with bugs in slow-logging functionality that causes large values for attributes. If the attribute's value is bigger than this, the last-seen value for that class of query is used instead. +=item --aux-dsn + +type: DSN + +Auxiliary DSN used for special options. + +The following options may require a DSN even when only parsing a slow log file: + + * --since + * --until + +See each option for why it might require a DSN. + =item --charset short form: -A; type: string @@ -15667,20 +15695,16 @@ default: yes Continue parsing even if there is an error. -=item --[no]create-history-table +=item --create-review-history-table -default: yes +Create the L<"--review-history"> table if it does not exist. -Create the L<"--review"> tables if they do not exist. +This option causes the table specified by L<"--review-history"> to be created +with the default structure shown in the documentation for that option. -This option causes the table specified by L<"--review"> to be created with the -default structure shown in the documentation for that option. +=item --create-review-table -=item --[no]create-review-table - -default: yes - -Create the L<"--review"> tables if they do not exist. +Create the L<"--review"> table if it does not exist. This option causes the table specified by L<"--review"> to be created with the default structure shown in the documentation for that option. @@ -15901,11 +15925,324 @@ L<"ATTRIBUTES">). Show help and exit. -=item --history +=item --host -type: string; default: percona_schema.query_history +short form: -h; type: string -The table in which to store query data for historical trend analysis. +Connect to host. + +=item --ignore-attributes + +type: array; default: arg, cmd, insert_id, ip, port, Thread_id, timestamp, exptime, flags, key, res, val, server_id, offset, end_log_pos, Xid + +Do not aggregate these attributes when auto-detecting L<"--select">. + +If you do not specify L<"--select"> then pt-query-digest auto-detects and +aggregates every attribute that it finds in the slow log. Some attributes, +however, should not be aggregated. This option allows you to specify a list +of attributes to ignore. This only works when no explicit L<"--select"> is +given. + +=item --inherit-attributes + +type: array; default: db,ts + +If missing, inherit these attributes from the last event that had them. + +This option sets which attributes are inherited or carried forward to events +which do not have them. For example, if one event has the db attribute equal +to "foo", but the next event doesn't have the db attribute, then it inherits +"foo" for its db attribute. + +=item --interval + +type: float; default: .1 + +How frequently to poll the processlist, in seconds. + +=item --iterations + +type: int; default: 1 + +How many times to iterate through the collect-and-report cycle. If 0, iterate +to infinity. Each iteration runs for L<"--run-time"> amount of time. An +iteration is usually determined by an amount of time and a report is printed +when that amount of time elapses. With L<"--run-time-mode"> C, +an interval is instead determined by the interval time you specify with +L<"--run-time">. See L<"--run-time"> and L<"--run-time-mode"> for more +information. + +=item --limit + +type: Array; default: 95%:20 + +Limit output to the given percentage or count. + +If the argument is an integer, report only the top N worst queries. If the +argument is an integer followed by the C<%> sign, report that percentage of the +worst queries. If the percentage is followed by a colon and another integer, +report the top percentage or the number specified by that integer, whichever +comes first. + +The value is actually a comma-separated array of values, one for each item in +L<"--group-by">. If you don't specify a value for any of those items, the +default is the top 95%. + +See also L<"--outliers">. + +=item --log + +type: string + +Print all output to this file when daemonized. + +=item --order-by + +type: Array; default: Query_time:sum + +Sort events by this attribute and aggregate function. + +This is a comma-separated list of order-by expressions, one for each +L<"--group-by"> attribute. The default C is used for +L<"--group-by"> attributes without explicitly given L<"--order-by"> attributes +(that is, if you specify more L<"--group-by"> attributes than corresponding +L<"--order-by"> attributes). The syntax is C. See +L<"ATTRIBUTES"> for valid attributes. Valid aggregates are: + + Aggregate Meaning + ========= ============================ + sum Sum/total attribute value + min Minimum attribute value + max Maximum attribute value + cnt Frequency/count of the query + +For example, the default C means that queries in the +query analysis report will be ordered (sorted) by their total query execution +time ("Exec time"). C orders the queries by their +maximum query execution time, so the query with the single largest +C will be list first. C refers more to the frequency +of the query as a whole, how often it appears; "Count" is its corresponding +line in the query analysis report. So any attribute and C should yield +the same report wherein queries are sorted by the number of times they +appear. + +When parsing general logs (L<"--type"> C), the default L<"--order-by"> +becomes C. General logs do not report query times so only +the C aggregate makes sense because all query times are zero. + +If you specify an attribute that doesn't exist in the events, then +pt-query-digest falls back to the default C and prints a notice +at the beginning of the report for each query class. You can create attributes +with L<"--filter"> and order by them; see L<"ATTRIBUTES"> for an example. + +=item --outliers + +type: array; default: Query_time:1:10 + +Report outliers by attribute:percentile:count. + +The syntax of this option is a comma-separated list of colon-delimited strings. +The first field is the attribute by which an outlier is defined. The second is +a number that is compared to the attribute's 95th percentile. The third is +optional, and is compared to the attribute's cnt aggregate. Queries that pass +this specification are added to the report, regardless of any limits you +specified in L<"--limit">. + +For example, to report queries whose 95th percentile Query_time is at least 60 +seconds and which are seen at least 5 times, use the following argument: + + --outliers Query_time:60:5 + +You can specify an --outliers option for each value in L<"--group-by">. + + +=item --output + +type: string; default: query + +Type of report to use. Accepted values are C<"query"> and C<"json">. + +=item --password + +short form: -p; type: string + +Password to use when connecting. + +=item --pid + +type: string + +Create the given PID file when daemonized. The file contains the process +ID of the daemonized instance. The PID file is removed when the +daemonized instance exits. The program checks for the existence of the +PID file when starting; if it exists and the process with the matching PID +exists, the program exits. + +=item --port + +short form: -P; type: int + +Port number to use for connection. + +=item --print + +Print log events to STDOUT in standard slow-query-log format. + +=item --print-iterations + +Print the start time for each L<"--iterations">. + +This option causes a line like the following to be printed at the start +of each L<"--iterations"> report: + + # Iteration 2 started at 2009-11-24T14:39:48.345780 + +This line will print even if C<--no-report> is specified. If C<--iterations 0> +is specified, each iteration number will be C<0>. + +=item --processlist + +type: DSN + +Poll this DSN's processlist for queries, with L<"--interval"> sleep between. + +If the connection fails, pt-query-digest tries to reopen it once per second. + +=item --progress + +type: array; default: time,30 + +Print progress reports to STDERR. The value is a comma-separated list with two +parts. The first part can be percentage, time, or iterations; the second part +specifies how often an update should be printed, in percentage, seconds, or +number of iterations. + +=item --read-timeout + +type: time; default: 0 + +Wait this long for an event from the input; 0 to wait forever. + +This option sets the maximum time to wait for an event from the input. It +applies to all types of input except L<"--processlist">. If an +event is not received after the specified time, the script stops reading the +input and prints its reports. If L<"--iterations"> is 0 or greater than +1, the next iteration will begin, else the script will exit. + +This option requires the Perl POSIX module. + +=item --[no]report + +default: yes + +Print out reports on the aggregate results from L<"--group-by">. + +This is the standard slow-log analysis functionality. See L<"OUTPUT"> for the +description of what this does and what the results look like. + +=item --report-all + +Include all queries, even if they have already been reviewed. + +=item --report-format + +type: Array; default: rusage,date,hostname,files,header,profile,query_report,prepared + +Print these sections of the query analysis report. + + SECTION PRINTS + ============ ====================================================== + rusage CPU times and memory usage reported by ps + date Current local date and time + hostname Hostname of machine on which pt-query-digest was run + files Input files read/parse + header Summary of the entire analysis run + profile Compact table of queries for an overview of the report + query_report Detailed information about each unique query + prepared Prepared statements + +The sections are printed in the order specified. The rusage, date, files and +header sections are grouped together if specified together; other sections are +separated by blank lines. + +See L<"OUTPUT"> for more information on the various parts of the query report. + +=item --report-histogram + +type: string; default: Query_time + +Chart the distribution of this attribute's values. + +The distribution chart is limited to time-based attributes, so charting +C, for example, will produce a useless chart. Charts look +like: + + # Query_time distribution + # 1us + # 10us + # 100us + # 1ms + # 10ms ################################ + # 100ms ################################################################ + # 1s ######## + # 10s+ + +See L<"OUTPUT"> for more information. + +=item --resume + +If enabled, the tool will save the furthest it got into the log before exiting; +Future runs on that log with --resume enabled will start from that position. + +=item --review + +type: DSN + +Store a sample of each class of query in this DSN. + +The argument specifies a table to store all unique query fingerprints in. The +table must have at least the following columns. You can add more columns for +your own special purposes, but they won't be used by pt-query-digest. The +following CREATE TABLE definition is also used for L<"--create-review-table">. +MAGIC_create_review: + + CREATE TABLE query_review ( + checksum BIGINT UNSIGNED NOT NULL PRIMARY KEY, + fingerprint TEXT NOT NULL, + sample TEXT NOT NULL, + first_seen DATETIME, + last_seen DATETIME, + reviewed_by VARCHAR(20), + reviewed_on DATETIME, + comments TEXT + ) + +The columns are as follows: + + COLUMN MEANING + =========== =============== + checksum A 64-bit checksum of the query fingerprint + fingerprint The abstracted version of the query; its primary key + sample The query text of a sample of the class of queries + first_seen The smallest timestamp of this class of queries + last_seen The largest timestamp of this class of queries + reviewed_by Initially NULL; if set, query is skipped thereafter + reviewed_on Initially NULL; not assigned any special meaning + comments Initially NULL; not assigned any special meaning + +Note that the C column is the true primary key for a class of +queries. The C is just a cryptographic hash of this value, which +provides a shorter value that is very likely to also be unique. + +After parsing and aggregating events, your table should contain a row for each +fingerprint. This option depends on C<--group-by fingerprint> (which is the +default). It will not work otherwise. + +=item --review-history + +type: DSN + +The table in which to store historical values for review trend analysis. Each time you review queries with L<"--review">, pt-query-digest will save information into this table so you can see how classes of queries have changed @@ -15943,7 +16280,7 @@ you could also just add a ts_min column and make it a DATE type, so you'd get one row per class of queries per day. The default table structure follows. The following MAGIC_create_review_history -table definition is used for L<"--create-review-tables">: +table definition is used for L<"--create-review-history-table">: CREATE TABLE query_review_history ( checksum BIGINT UNSIGNED NOT NULL, @@ -16049,312 +16386,6 @@ table definition is used for L<"--create-review-tables">: Note that we store the count (cnt) for the ts attribute only; it will be redundant to store this for other attributes. -=item --host - -short form: -h; type: string - -Connect to host. - -=item --ignore-attributes - -type: array; default: arg, cmd, insert_id, ip, port, Thread_id, timestamp, exptime, flags, key, res, val, server_id, offset, end_log_pos, Xid - -Do not aggregate these attributes when auto-detecting L<"--select">. - -If you do not specify L<"--select"> then pt-query-digest auto-detects and -aggregates every attribute that it finds in the slow log. Some attributes, -however, should not be aggregated. This option allows you to specify a list -of attributes to ignore. This only works when no explicit L<"--select"> is -given. - -=item --inherit-attributes - -type: array; default: db,ts - -If missing, inherit these attributes from the last event that had them. - -This option sets which attributes are inherited or carried forward to events -which do not have them. For example, if one event has the db attribute equal -to "foo", but the next event doesn't have the db attribute, then it inherits -"foo" for its db attribute. - -=item --interval - -type: float; default: .1 - -How frequently to poll the L<"--processlist">, in seconds. - -=item --iterations - -type: int; default: 1 - -How many times to iterate through the collect-and-report cycle. If 0, iterate -to infinity. Each iteration runs for L<"--run-time"> amount of time. An -iteration is usually determined by an amount of time and a report is printed -when that amount of time elapses. With L<"--run-time-mode"> C, -an interval is instead determined by the interval time you specify with -L<"--run-time">. See L<"--run-time"> and L<"--run-time-mode"> for more -information. - -=item --limit - -type: Array; default: 95%:20 - -Limit output to the given percentage or count. - -If the argument is an integer, report only the top N worst queries. If the -argument is an integer followed by the C<%> sign, report that percentage of the -worst queries. If the percentage is followed by a colon and another integer, -report the top percentage or the number specified by that integer, whichever -comes first. - -The value is actually a comma-separated array of values, one for each item in -L<"--group-by">. If you don't specify a value for any of those items, the -default is the top 95%. - -See also L<"--outliers">. - -=item --log - -type: string - -Print all output to this file when daemonized. - -=item --order-by - -type: Array; default: Query_time:sum - -Sort events by this attribute and aggregate function. - -This is a comma-separated list of order-by expressions, one for each -L<"--group-by"> attribute. The default C is used for -L<"--group-by"> attributes without explicitly given L<"--order-by"> attributes -(that is, if you specify more L<"--group-by"> attributes than corresponding -L<"--order-by"> attributes). The syntax is C. See -L<"ATTRIBUTES"> for valid attributes. Valid aggregates are: - - Aggregate Meaning - ========= ============================ - sum Sum/total attribute value - min Minimum attribute value - max Maximum attribute value - cnt Frequency/count of the query - -For example, the default C means that queries in the -query analysis report will be ordered (sorted) by their total query execution -time ("Exec time"). C orders the queries by their -maximum query execution time, so the query with the single largest -C will be list first. C refers more to the frequency -of the query as a whole, how often it appears; "Count" is its corresponding -line in the query analysis report. So any attribute and C should yield -the same report wherein queries are sorted by the number of times they -appear. - -When parsing general logs (L<"--type"> C), the default L<"--order-by"> -becomes C. General logs do not report query times so only -the C aggregate makes sense because all query times are zero. - -If you specify an attribute that doesn't exist in the events, then -pt-query-digest falls back to the default C and prints a notice -at the beginning of the report for each query class. You can create attributes -with L<"--filter"> and order by them; see L<"ATTRIBUTES"> for an example. - -=item --outliers - -type: array; default: Query_time:1:10 - -Report outliers by attribute:percentile:count. - -The syntax of this option is a comma-separated list of colon-delimited strings. -The first field is the attribute by which an outlier is defined. The second is -a number that is compared to the attribute's 95th percentile. The third is -optional, and is compared to the attribute's cnt aggregate. Queries that pass -this specification are added to the report, regardless of any limits you -specified in L<"--limit">. - -For example, to report queries whose 95th percentile Query_time is at least 60 -seconds and which are seen at least 5 times, use the following argument: - - --outliers Query_time:60:5 - -You can specify an --outliers option for each value in L<"--group-by">. - -=item --output - -type: string; default: report - -Type of report to print to C. - -Accepted values are C, C, C, and C. - -C is useful with L<"--review"> and L<"--history"> when you only want -to save the query data and not have a printed reported. - -=item --password - -short form: -p; type: string - -Password to use when connecting. - -=item --pid - -type: string - -Create the given PID file when daemonized. The file contains the process -ID of the daemonized instance. The PID file is removed when the -daemonized instance exits. The program checks for the existence of the -PID file when starting; if it exists and the process with the matching PID -exists, the program exits. - -=item --port - -short form: -P; type: int - -Port number to use for connection. - -=item --print-iterations - -Print the start time for each L<"--iterations">. - -This option causes a line like the following to be printed at the start -of each L<"--iterations"> report: - - # Iteration 2 started at 2009-11-24T14:39:48.345780 - -This line will print even if C<--no-report> is specified. If C<--iterations 0> -is specified, each iteration number will be C<0>. - -=item --processlist - -type: DSN - -Poll this DSN's processlist for queries, with L<"--interval"> sleep between. - -If the connection fails, pt-query-digest tries to reopen it once per second. - -The DSN inherits from the DSN given on the command line, if any. - -=item --progress - -type: array; default: time,30 - -Print progress reports to STDERR. The value is a comma-separated list with two -parts. The first part can be percentage, time, or iterations; the second part -specifies how often an update should be printed, in percentage, seconds, or -number of iterations. - -=item --read-timeout - -type: time; default: 0 - -Wait this long for an event from the input; 0 to wait forever. - -This option sets the maximum time to wait for an event from the input. It -applies to all types of input except L<"--processlist">. If an -event is not received after the specified time, the script stops reading the -input and prints its reports. If L<"--iterations"> is 0 or greater than -1, the next iteration will begin, else the script will exit. - -This option requires the Perl POSIX module. - -=item --report-all - -Include all queries, even if they have already been reviewed. - -=item --report-format - -type: Array; default: rusage,date,hostname,files,header,profile,query_report,prepared - -Print these sections of the query analysis report. - - SECTION PRINTS - ============ ====================================================== - rusage CPU times and memory usage reported by ps - date Current local date and time - hostname Hostname of machine on which pt-query-digest was run - files Input files read/parse - header Summary of the entire analysis run - profile Compact table of queries for an overview of the report - query_report Detailed information about each unique query - prepared Prepared statements - -The sections are printed in the order specified. The rusage, date, files and -header sections are grouped together if specified together; other sections are -separated by blank lines. - -See L<"OUTPUT"> for more information on the various parts of the query report. - -=item --report-histogram - -type: string; default: Query_time - -Chart the distribution of this attribute's values. - -The distribution chart is limited to time-based attributes, so charting -C, for example, will produce a useless chart. Charts look -like: - - # Query_time distribution - # 1us - # 10us - # 100us - # 1ms - # 10ms ################################ - # 100ms ################################################################ - # 1s ######## - # 10s+ - -See L<"OUTPUT"> for more information. - -=item --resume - -If enabled, the tool will save the furthest it got into the log before exiting; -Future runs on that log with --resume enabled will start from that position. - -=item --review - -type: string; default: percona_schema.query_review - -Store a sample of each class of query in this DSN. - -The argument specifies a table to store all unique query fingerprints in. The -table must have at least the following columns. You can add more columns for -your own special purposes, but they won't be used by pt-query-digest. The -following CREATE TABLE definition is also used for L<"--create-review-tables">. -MAGIC_create_review: - - CREATE TABLE query_review ( - checksum BIGINT UNSIGNED NOT NULL PRIMARY KEY, - fingerprint TEXT NOT NULL, - sample TEXT NOT NULL, - first_seen DATETIME, - last_seen DATETIME, - reviewed_by VARCHAR(20), - reviewed_on DATETIME, - comments TEXT - ) - -The columns are as follows: - - COLUMN MEANING - =========== =============== - checksum A 64-bit checksum of the query fingerprint - fingerprint The abstracted version of the query; its primary key - sample The query text of a sample of the class of queries - first_seen The smallest timestamp of this class of queries - last_seen The largest timestamp of this class of queries - reviewed_by Initially NULL; if set, query is skipped thereafter - reviewed_on Initially NULL; not assigned any special meaning - comments Initially NULL; not assigned any special meaning - -Note that the C column is the true primary key for a class of -queries. The C is just a cryptographic hash of this value, which -provides a shorter value that is very likely to also be unique. - -After parsing and aggregating events, your table should contain a row for each -fingerprint. This option depends on C<--group-by fingerprint> (which is the -default). It will not work otherwise. - =item --run-time type: time @@ -16457,8 +16488,8 @@ Previously, pt-query-digest only aggregated these attributes: Query_time,Lock_time,Rows_sent,Rows_examined,user,db:Schema,ts -Attributes specified in the review history table (see L<"--review"> will -always be selected even if you do not specify L<"--select">. +Attributes specified in the L<"--review-history"> table will always be selected +even if you do not specify L<"--select">. See also L<"--ignore-attributes"> and L<"ATTRIBUTES">. @@ -16519,7 +16550,11 @@ several types: CURRENT_DATE - INTERVAL 7 DAY If you give a MySQL time expression, then you must also specify a DSN -so that pt-query-digest can connect to MySQL to evaluate the expression. +so that pt-query-digest can connect to MySQL to evaluate the expression. If you +specify L<"--explain">, L<"--processlist">, L<"--review"> +or L<"--review-history">, then one of these DSNs will be used automatically. +Otherwise, you must specify an L<"--aux-dsn"> or pt-query-digest will die +saying that the value is invalid. The MySQL time expression is wrapped inside a query like "SELECT UNIX_TIMESTAMP()", so be sure that the expression is @@ -16596,6 +16631,52 @@ Parse a MySQL general log file. General logs lack a lot of L<"ATTRIBUTES">, notably C. The default L<"--order-by"> for general logs changes to C. +=item http + +Parse HTTP traffic from tcpdump. + +=item pglog + +Parse a log file in PostgreSQL format. The parser will automatically recognize +logs sent to syslog and transparently parse the syslog format, too. The +recommended configuration for logging in your postgresql.conf is as follows. + +The log_destination setting can be set to either syslog or stderr. Syslog has +the added benefit of not interleaving log messages from several sessions +concurrently, which the parser cannot handle, so this might be better than +stderr. CSV-formatted logs are not supported at this time. + +The log_min_duration_statement setting should be set to 0 to capture all +statements with their durations. Alternatively, the parser will also recognize +and handle various combinations of log_duration and log_statement. + +You may enable log_connections and log_disconnections, but this is optional. + +It is highly recommended to set your log_line_prefix to the following: + + log_line_prefix = '%m c=%c,u=%u,D=%d ' + +This lets the parser find timestamps with milliseconds, session IDs, users, and +databases from the log. If these items are missing, you'll simply get less +information to analyze. For compatibility with other log analysis tools such as +PQA and pgfouine, various log line prefix formats are supported. The general +format is as follows: a timestamp can be detected and extracted (the syslog +timestamp is NOT parsed), and a name=value list of properties can also. +Although the suggested format is as shown above, any name=value list will be +captured and interpreted by using the first letter of the 'name' part, +lowercased, to determine the meaning of the item. The lowercased first letter +is interpreted to mean the same thing as PostgreSQL's built-in %-codes for the +log_line_prefix format string. For example, u means user, so unicorn=fred +will be interpreted as user=fred; d means database, so D=john will be +interpreted as database=john. The pgfouine-suggested formatting is user=%u and +db=%d, so it should Just Work regardless of which format you choose. The main +thing is to add as much information as possible into the log_line_prefix to +permit richer analysis. + +Currently, only English locale messages are supported, so if your server's +locale is set to something else, the log won't be parsed properly. (Log +messages with "duration:" and "statement:" won't be recognized.) + =item slowlog Parse a log file in any variation of MySQL slow-log format. @@ -16661,6 +16742,17 @@ database. Server-side prepared statements are supported. SSL-encrypted traffic cannot be inspected and decoded. +=item memcached + +Similar to tcpdump, but the expected input is memcached packets +instead of MySQL packets. For example: + + tcpdump -i any port 11211 -s 65535 -x -nn -q -tttt \ + > memcached.tcp.txt + pt-query-digest --type memcached memcached.tcp.txt + +memcached uses port 11211 by default. + =back =item --until From 3d93f42bc7c7c8aed3b0cf8bb8286db346e9461d Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Thu, 17 Jan 2013 15:10:53 -0300 Subject: [PATCH 19/34] Add missing if to the resume code --- bin/pt-query-digest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index cd735aa4..01bf5e1f 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -13841,7 +13841,9 @@ sub main { $args->{input_fh} = $fh; $args->{tell} = sub { my $pos = tell $fh; - $args->{pos_for}->{$args->{filename}} = $pos; + if ( $args->{filename} ) { + $args->{pos_for}->{$args->{filename}} = $pos; + } return $pos; }; $args->{more_events} = 1; From 41f440518f61d2b8e89c0621b862fe79c3b309ca Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Thu, 17 Jan 2013 19:40:58 -0300 Subject: [PATCH 20/34] Fixed the precisions for numbers in the json output --- bin/pt-query-digest | 6 ++++-- lib/JSONReportFormatter.pm | 9 +++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 01bf5e1f..4e779928 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -8049,11 +8049,13 @@ override query_report => sub { elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { for my $value ( values %{$metrics{$attrib}} ) { next unless $value; - $value = sprintf '%.7f', $value; + $value = sprintf '%.6f', $value; + } + if ( my $pct = $metrics{$attrib}->{pct} ) { + $metrics{$attrib}->{pct} = sprintf('%.2f', $pct); } } } - push @queries, { class => \%class, attributes => \%metrics, diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index e2cc0a31..331e08de 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -71,13 +71,18 @@ override query_report => sub { $class{ts_max} = $ts->{max}; } elsif ( ($ea->{type_for}->{$attrib} || '') eq 'num' ) { + # Avoid scientific notation in the metrics by forcing it to use + # six decimal places. for my $value ( values %{$metrics{$attrib}} ) { next unless $value; - $value = sprintf '%.7f', $value; + $value = sprintf '%.6f', $value; + } + # ..except for the percentage, which only needs two + if ( my $pct = $metrics{$attrib}->{pct} ) { + $metrics{$attrib}->{pct} = sprintf('%.2f', $pct); } } } - push @queries, { class => \%class, attributes => \%metrics, From 86f9e302f5a5a6aca549d2f544922bc427c250dd Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Thu, 17 Jan 2013 19:44:18 -0300 Subject: [PATCH 21/34] Updated samples for t/pqd/output.t --- t/pt-query-digest/samples/output_json_slow002.txt | 2 +- t/pt-query-digest/samples/output_json_tcpdump021.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/t/pt-query-digest/samples/output_json_slow002.txt b/t/pt-query-digest/samples/output_json_slow002.txt index adeae63b..c2798ea4 100644 --- a/t/pt-query-digest/samples/output_json_slow002.txt +++ b/t/pt-query-digest/samples/output_json_slow002.txt @@ -1,2 +1,2 @@ -[{"event_report":{"time_range":"all events occurred at 2007-12-18 11:48:27","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":8},"concurrency":0,"checksum":"66825DDC008FFA89","pos_in_log":338,"attributes":{"bool":[["Full_scan","100","0"]],"innodb":[],"num":[["Query_time","95","726ms","726ms","726ms","726ms","726ms",0,"726ms"],["Lock_time","29","91us","91us","91us","91us","91us",0,"91us"],["Rows_sent","0","0","0","0","0","0","0","0"],["Rows_examined","100","61.48k","61.48k","61.48k","61.48k","61.48k","0","61.48k"],["Merge_passes","0","0","0","0","0","0","0","0"],["bytes","25","129","129","129","129","129","0","129"]],"string":[["db",{"min":"db1","max":"db1","unq":{"db1":1},"cnt":1}],["host",{"min":"","max":"","unq":{"":1},"cnt":1}],["user",{"min":"[SQL_SLAVE]","max":"[SQL_SLAVE]","unq":{"[SQL_SLAVE]":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"update d?tuningdetail_?_? n inner join d?gonzo a using(gonzo) set n.column? = a.column?, n.word? = a.word?","samp_query":"update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3","for_explain":"EXPLAIN /*!50100 PARTITIONS*/\nselect n.column1 = a.column1, n.word3 = a.word3 from db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \\G\n","tables":[["db2","tuningdetail_21_265507"],["db1","gonzo"]],"rank":1,"default_db":"db1"}] +[{"class":{"checksum":"66825DDC008FFA89","ts_min":"2007-12-18 11:48:27","ts_max":"2007-12-18 11:48:27","fingerprint":"update d?tuningdetail_?_? n inner join d?gonzo a using(gonzo) set n.column? = a.column?, n.word? = a.word?","sample":"update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3","cnt":1},"attributes":{"bytes":{"pct":"0.12","avg":"129.000000","min":"129.000000","max":"129.000000","median":"129.000000","cnt":"1.000000","stddev":0,"pct_95":"129.000000","sum":"129.000000"},"db":{"pct":0.142857142857143,"avg":0,"min":"db1","max":"db1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Full_join":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"Merge_passes":{"pct":"0.12","avg":0,"min":"0","max":"0","median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Filesort":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"user":{"pct":0.125,"avg":0,"min":"[SQL_SLAVE]","max":"[SQL_SLAVE]","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Rows_sent":{"pct":"0.12","avg":0,"min":"0","max":"0","median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Lock_time":{"pct":"0.12","avg":"0.000091","min":"0.000091","max":"0.000091","median":"0.000091","cnt":"1.000000","stddev":0,"pct_95":"0.000091","sum":"0.000091"},"Full_scan":{"pct":0.125,"avg":1,"min":1,"max":1,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":1},"Filesort_on_disk":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"host":{"pct":0.125,"avg":0,"min":"","max":"","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.12","avg":"338.000000","min":"338.000000","max":"338.000000","median":"338.000000","cnt":"1.000000","stddev":0,"pct_95":"338.000000","sum":"338.000000"},"Tmp_table":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"QC_Hit":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"Tmp_table_on_disk":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"Rows_examined":{"pct":"0.12","avg":"62951.000000","min":"62951.000000","max":"62951.000000","median":"62951.000000","cnt":"1.000000","stddev":0,"pct_95":"62951.000000","sum":"62951.000000"},"Query_time":{"pct":"0.12","avg":"0.726052","min":"0.726052","max":"0.726052","median":"0.726052","cnt":"1.000000","stddev":0,"pct_95":"0.726052","sum":"0.726052"}}}] diff --git a/t/pt-query-digest/samples/output_json_tcpdump021.txt b/t/pt-query-digest/samples/output_json_tcpdump021.txt index dad915c0..d9252715 100644 --- a/t/pt-query-digest/samples/output_json_tcpdump021.txt +++ b/t/pt-query-digest/samples/output_json_tcpdump021.txt @@ -1,2 +1,2 @@ -[{"event_report":{"time_range":"all events occurred at 2009-12-08 09:23:49.637394","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":3},"concurrency":0,"checksum":"AA8E9FA785927259","pos_in_log":0,"attributes":{"bool":[],"innodb":[],"num":[["Query_time","50","286us","286us","286us","286us","286us",0,"286us"],["Rows_affected","0","0","0","0","0","0","0","0"],["bytes","35","35","35","35","35","35","0","35"],["Warning_count","0","0","0","0","0","0","0","0"]],"string":[["Error_no",{"min":"none","max":"none","unq":{"none":1},"cnt":1}],["host",{"min":"127.0.0.1","max":"127.0.0.1","unq":{"127.0.0.1":1},"cnt":1}],["Statement_id",{"min":2,"max":2,"unq":{"2":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"prepare select i from d.t where i=?","samp_query":"PREPARE SELECT i FROM d.t WHERE i=?","for_explain":"EXPLAIN /*!50100 PARTITIONS*/\nSELECT i FROM d.t WHERE i=?\\G\n","tables":[["d","t"]],"rank":1,"default_db":null},{"event_report":{"time_range":"all events occurred at 2009-12-08 09:23:49.637892","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":3},"concurrency":0,"checksum":"3F79759E7FA2F117","pos_in_log":1106,"attributes":{"bool":[["No_index_used","100","0"]],"innodb":[],"num":[["Query_time","49","281us","281us","281us","281us","281us",0,"281us"],["Rows_affected","0","0","0","0","0","0","0","0"],["bytes","37","37","37","37","37","37","0","37"],["Warning_count","0","0","0","0","0","0","0","0"]],"string":[["Error_no",{"min":"none","max":"none","unq":{"none":1},"cnt":1}],["host",{"min":"127.0.0.1","max":"127.0.0.1","unq":{"127.0.0.1":1},"cnt":1}],["Statement_id",{"min":"2","max":"2","unq":{"2":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"execute select i from d.t where i=?","samp_query":"EXECUTE SELECT i FROM d.t WHERE i=\"3\"","for_explain":"EXPLAIN /*!50100 PARTITIONS*/\nSELECT i FROM d.t WHERE i=\"3\"\\G\n","tables":[["d","t"]],"rank":2,"default_db":null},{"samp_query":"administrator command: Quit","tables":[],"event_report":{"time_range":"all events occurred at 2009-12-08 09:23:49.638381","variance_to_mean":0,"reason":"top","qps":0,"counts":{"class_cnt":1,"global_cnt":3},"concurrency":0,"checksum":"AA353644DE4C4CB4","pos_in_log":1850,"attributes":{"bool":[],"innodb":[],"num":[["Query_time","0",0,0,0,0,0,0,0],["Rows_affected","0","0","0","0","0","0","0","0"],["bytes","27","27","27","27","27","27","0","27"],["Warning_count","0","0","0","0","0","0","0","0"]],"string":[["Error_no",{"min":"none","max":"none","unq":{"none":1},"cnt":1}],["host",{"min":"127.0.0.1","max":"127.0.0.1","unq":{"127.0.0.1":1},"cnt":1}]]},"groupby":"fingerprint"},"reason":"top","item":"administrator command: Quit","rank":3,"default_db":null}] +[{"class":{"checksum":"AA8E9FA785927259","ts_min":"2009-12-08 09:23:49.637394","ts_max":"2009-12-08 09:23:49.637394","fingerprint":"prepare select i from d.t where i=?","sample":"PREPARE SELECT i FROM d.t WHERE i=?","cnt":1},"attributes":{"bytes":{"pct":"0.33","avg":"35.000000","min":"35.000000","max":"35.000000","median":"35.000000","cnt":"1.000000","stddev":0,"pct_95":"35.000000","sum":"35.000000"},"No_good_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"No_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"host":{"pct":0.333333333333333,"avg":0,"min":"127.0.0.1","max":"127.0.0.1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Rows_affected":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Statement_id":{"pct":0.5,"avg":0,"min":2,"max":2,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Error_no":{"pct":0.333333333333333,"avg":0,"min":"none","max":"none","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Query_time":{"pct":"0.33","avg":"0.000286","min":"0.000286","max":"0.000286","median":"0.000286","cnt":"1.000000","stddev":0,"pct_95":"0.000286","sum":"0.000286"},"Warning_count":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0}}},{"class":{"checksum":"3F79759E7FA2F117","ts_min":"2009-12-08 09:23:49.637892","ts_max":"2009-12-08 09:23:49.637892","fingerprint":"execute select i from d.t where i=?","sample":"EXECUTE SELECT i FROM d.t WHERE i=\"3\"","cnt":1},"attributes":{"bytes":{"pct":"0.33","avg":"37.000000","min":"37.000000","max":"37.000000","median":"37.000000","cnt":"1.000000","stddev":0,"pct_95":"37.000000","sum":"37.000000"},"No_good_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"No_index_used":{"pct":0.333333333333333,"avg":1,"min":1,"max":1,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":1},"host":{"pct":0.333333333333333,"avg":0,"min":"127.0.0.1","max":"127.0.0.1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.33","avg":"1106.000000","min":"1106.000000","max":"1106.000000","median":"1106.000000","cnt":"1.000000","stddev":0,"pct_95":"1106.000000","sum":"1106.000000"},"Rows_affected":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Statement_id":{"pct":0.5,"avg":0,"min":"2","max":"2","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Error_no":{"pct":0.333333333333333,"avg":0,"min":"none","max":"none","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Query_time":{"pct":"0.33","avg":"0.000281","min":"0.000281","max":"0.000281","median":"0.000281","cnt":"1.000000","stddev":0,"pct_95":"0.000281","sum":"0.000281"},"Warning_count":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0}}},{"class":{"checksum":"AA353644DE4C4CB4","ts_min":"2009-12-08 09:23:49.638381","ts_max":"2009-12-08 09:23:49.638381","fingerprint":"administrator command: Quit","sample":"administrator command: Quit","cnt":1},"attributes":{"bytes":{"pct":"0.33","avg":"27.000000","min":"27.000000","max":"27.000000","median":"27.000000","cnt":"1.000000","stddev":0,"pct_95":"27.000000","sum":"27.000000"},"No_good_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"No_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"host":{"pct":0.333333333333333,"avg":0,"min":"127.0.0.1","max":"127.0.0.1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.33","avg":"1850.000000","min":"1850.000000","max":"1850.000000","median":"1850.000000","cnt":"1.000000","stddev":0,"pct_95":"1850.000000","sum":"1850.000000"},"Rows_affected":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Error_no":{"pct":0.333333333333333,"avg":0,"min":"none","max":"none","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Query_time":{"pct":"0.33","avg":0,"min":"0.000000","max":"0.000000","median":"0.000000","cnt":"1.000000","stddev":0,"pct_95":"0.000000","sum":0},"Warning_count":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0}}}] From e466408c0d4a2e8239fd1c11a67d1951edaeaa09 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Fri, 18 Jan 2013 09:47:05 -0300 Subject: [PATCH 22/34] QueryReportFormatter: Resolve a hash ordering issue & fix an error introduced by the refactoring which was breaking two tests: checking if --report-histogram rather than --report-all --- bin/pt-query-digest | 8 ++++---- lib/QueryReportFormatter.pm | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 4e779928..7d4fa6b3 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -7106,9 +7106,9 @@ sub query_report_values { my $review_vals; if ( $qv ) { $review_vals = $qv->get_review_info($item); - next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_histogram}; + next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_all}; for my $col ( $qv->review_cols() ) { - $item_vals{review_vals}{$col} = $review_vals->{$col}; + push @{$item_vals{review_vals}}, [$col, $review_vals->{$col}]; } } @@ -7176,8 +7176,8 @@ sub query_report { if ( $vals->{review_vals} ) { $report .= "# Review information\n"; - foreach my $col ( keys %{$vals->{review_vals}} ) { - my $val = $vals->{review_vals}->{$col}; + foreach my $elem ( @{$vals->{review_vals}} ) { + my ($col, $val) = @$elem; if ( !$val || $val ne '0000-00-00 00:00:00' ) { # issue 202 $report .= sprintf "# %13s: %-s\n", $col, ($val ? $val : ''); } diff --git a/lib/QueryReportFormatter.pm b/lib/QueryReportFormatter.pm index afc6fb82..e679fdaa 100644 --- a/lib/QueryReportFormatter.pm +++ b/lib/QueryReportFormatter.pm @@ -363,9 +363,9 @@ sub query_report_values { my $review_vals; if ( $qv ) { $review_vals = $qv->get_review_info($item); - next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_histogram}; + next ITEM if $review_vals->{reviewed_by} && !$self->{options}->{report_all}; for my $col ( $qv->review_cols() ) { - $item_vals{review_vals}{$col} = $review_vals->{$col}; + push @{$item_vals{review_vals}}, [$col, $review_vals->{$col}]; } } @@ -452,8 +452,8 @@ sub query_report { # Print the review information that is already in the table # before putting anything new into the table. $report .= "# Review information\n"; - foreach my $col ( keys %{$vals->{review_vals}} ) { - my $val = $vals->{review_vals}->{$col}; + foreach my $elem ( @{$vals->{review_vals}} ) { + my ($col, $val) = @$elem; if ( !$val || $val ne '0000-00-00 00:00:00' ) { # issue 202 $report .= sprintf "# %13s: %-s\n", $col, ($val ? $val : ''); } From 4a06dc2008cff43810cd724c7afea98116d1b657 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Fri, 18 Jan 2013 10:20:59 -0300 Subject: [PATCH 23/34] Updated samples to get rid of the extra InnoDB: line removed by the refactoring --- t/pt-query-digest/samples/issue_1196-output.txt | 10 ++++------ .../samples/slow002-orderbynonexistent.txt | 2 -- t/pt-query-digest/samples/slow002_iters_2.txt | 1 - t/pt-query-digest/samples/slow002_orderbyreport.txt | 1 - t/pt-query-digest/samples/slow002_report.txt | 2 -- t/pt-query-digest/samples/slow035.txt | 1 - 6 files changed, 4 insertions(+), 13 deletions(-) diff --git a/t/pt-query-digest/samples/issue_1196-output.txt b/t/pt-query-digest/samples/issue_1196-output.txt index d7b2e2ef..71d380f8 100644 --- a/t/pt-query-digest/samples/issue_1196-output.txt +++ b/t/pt-query-digest/samples/issue_1196-output.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 1.00 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aa -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-12-14 16:12:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt index 1bad0e83..b6cc3a83 100644 --- a/t/pt-query-digest/samples/slow002-orderbynonexistent.txt +++ b/t/pt-query-digest/samples/slow002-orderbynonexistent.txt @@ -12,7 +12,6 @@ # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: @@ -254,7 +253,6 @@ VALUES ('', 'Exact')\G # Rows examine 0 0 0 0 0 0 0 0 # Merge passes 0 0 0 0 0 0 0 0 # Query size 0 5 5 5 5 5 0 5 -# InnoDB: # String: # Hosts # Users [SQL_SLAVE] diff --git a/t/pt-query-digest/samples/slow002_iters_2.txt b/t/pt-query-digest/samples/slow002_iters_2.txt index e0ee2280..6bd7c087 100644 --- a/t/pt-query-digest/samples/slow002_iters_2.txt +++ b/t/pt-query-digest/samples/slow002_iters_2.txt @@ -12,7 +12,6 @@ # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: diff --git a/t/pt-query-digest/samples/slow002_orderbyreport.txt b/t/pt-query-digest/samples/slow002_orderbyreport.txt index 5eaf9e0b..23cd33f2 100644 --- a/t/pt-query-digest/samples/slow002_orderbyreport.txt +++ b/t/pt-query-digest/samples/slow002_orderbyreport.txt @@ -54,7 +54,6 @@ select biz = '91848182522' from foo.bar \G # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: diff --git a/t/pt-query-digest/samples/slow002_report.txt b/t/pt-query-digest/samples/slow002_report.txt index 50d6b4b3..0426d96a 100644 --- a/t/pt-query-digest/samples/slow002_report.txt +++ b/t/pt-query-digest/samples/slow002_report.txt @@ -11,7 +11,6 @@ # Rows examine 100 61.48k 61.48k 61.48k 61.48k 61.48k 0 61.48k # Merge passes 0 0 0 0 0 0 0 0 # Query size 25 129 129 129 129 129 0 129 -# InnoDB: # Boolean: # Full scan 100% yes, 0% no # String: @@ -253,7 +252,6 @@ VALUES ('', 'Exact')\G # Rows examine 0 0 0 0 0 0 0 0 # Merge passes 0 0 0 0 0 0 0 0 # Query size 0 5 5 5 5 5 0 5 -# InnoDB: # String: # Hosts # Users [SQL_SLAVE] diff --git a/t/pt-query-digest/samples/slow035.txt b/t/pt-query-digest/samples/slow035.txt index d65d9a6a..c337879d 100644 --- a/t/pt-query-digest/samples/slow035.txt +++ b/t/pt-query-digest/samples/slow035.txt @@ -66,7 +66,6 @@ INSERT INTO db.v (m, b) VALUES ('', 'Exact')\G # Rows sent 0 0 0 0 0 0 0 0 # Rows examine 0 0 0 0 0 0 0 0 # Query size 52 48 48 48 48 48 0 48 -# InnoDB: # String: # Hosts # Users [SQL_SLAVE] From 06ce6e012e4b683249ed160482eceb380e5e0e0e Mon Sep 17 00:00:00 2001 From: Daniel Nichter Date: Fri, 18 Jan 2013 17:44:07 -0700 Subject: [PATCH 24/34] Fix --resume. --- bin/pt-query-digest | 167 +++++++++++++++++++++++++++----------------- bin/pt-table-usage | 15 ++-- lib/Pipeline.pm | 13 ++-- t/lib/Pipeline.t | 2 +- 4 files changed, 118 insertions(+), 79 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 7d4fa6b3..6e98a7d7 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -12039,9 +12039,7 @@ sub add { push @{$self->{procs}}, $process; push @{$self->{names}}, $name; - if ( my $n = $args{retry_on_error} ) { - $self->{retries}->{$name} = $n; - } + $self->{retries}->{$name} = $args{retry_on_error} || 100; if ( $self->{instrument} ) { $self->{instrumentation}->{$name} = { time => 0, calls => 0 }; } @@ -12110,7 +12108,11 @@ sub execute { my $msg = "Pipeline process " . ($procno + 1) . " ($name) caused an error: " . $EVAL_ERROR; - if ( defined $self->{retries}->{$name} ) { + if ( !$self->{continue_on_error} ) { + die $msg . "Terminating pipeline because --continue-on-error " + . "is false.\n"; + } + elsif ( defined $self->{retries}->{$name} ) { my $n = $self->{retries}->{$name}; if ( $n ) { warn $msg . "Will retry pipeline process $procno ($name) " @@ -12122,9 +12124,6 @@ sub execute { . "($name) caused too many errors.\n"; } } - elsif ( !$self->{continue_on_error} ) { - die $msg; - } else { warn $msg; } @@ -13483,22 +13482,37 @@ sub _d { # ########################################################################### package pt_query_digest; +use strict; +use warnings FATAL => 'all'; use English qw(-no_match_vars); -use Time::Local qw(timelocal); -use Time::HiRes qw(time usleep); -use List::Util qw(max); -use POSIX qw(signal_h); -use File::Spec; -use Data::Dumper; -$Data::Dumper::Indent = 1; -$OUTPUT_AUTOFLUSH = 1; +use constant PTDEBUG => $ENV{PTDEBUG} || 0; -Transformers->import(qw(shorten micro_t percentage_of ts make_checksum - any_unix_timestamp parse_timestamp unix_timestamp crc32)); +use Time::Local qw(timelocal); +use Time::HiRes qw(time usleep); +use List::Util qw(max); +use Scalar::Util qw(looks_like_number); +use POSIX qw(signal_h); +use Data::Dumper; use Percona::Toolkit; -use JSONReportFormatter; -use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +$OUTPUT_AUTOFLUSH = 1; + +Transformers->import(qw( + shorten + micro_t + percentage_of + ts + make_checksum + any_unix_timestamp + parse_timestamp + unix_timestamp + crc32 +)); use sigtrap 'handler', \&sig_int, 'normal-signals'; @@ -13508,13 +13522,15 @@ my $ep_dbh; # For --explain my $ps_dbh; # For Processlist my $aux_dbh; # For --aux-dsn (--since/--until "MySQL expression") -my %resume; -my $save_resume = undef; -my $resume_file = File::Spec->catfile(File::Spec->tmpdir(), 'pt-query-digest-resume'); +my $resume_file; +my $offset; sub main { - local @ARGV = @_; # set global ARGV for this package - $oktorun = 1; # reset between tests else pipeline won't run + # Reset global vars, else tests will fail. + local @ARGV = @_; + $oktorun = 1; + $resume_file = undef; + $offset = undef; # ########################################################################## # Get configuration information. @@ -13597,8 +13613,6 @@ sub main { $o->usage_or_errors(); - $save_resume = $o->get('resume'); - # ######################################################################## # Common modules. # ####################################################################### @@ -13748,16 +13762,6 @@ sub main { ); } } - - if ( $o->get('resume') ) { - if (open my $resume_fh, q{<}, $resume_file) { - while (my $line = <$resume_fh>) { - chomp $line; - my ($file, $pos) = $line =~ m/\A(.+)\t([0-9]+)\z/; - $resume{$file} = $pos; - } - } - } # ######################################################################## # Create all the pipeline processes that do all the work: get input, @@ -13807,7 +13811,7 @@ sub main { } # prep { # input - my $fi = new FileIterator(resume => \%resume); + my $fi = FileIterator->new(); my $next_file = $fi->get_file_itr(@ARGV); my $input_fh; # the current input fh my $pr; # Progress obj for ^ @@ -13816,20 +13820,52 @@ sub main { name => 'input', process => sub { my ( $args ) = @_; + # Only get the next file when there's no fh or no more events in # the current fh. This allows us to do collect-and-report cycles # (i.e. iterations) on huge files. This doesn't apply to infinite # inputs because they don't set more_events false. if ( !$args->{input_fh} || !$args->{more_events} ) { + + # Close the current file. if ( $args->{input_fh} ) { close $args->{input_fh} or die "Cannot close input fh: $OS_ERROR"; } + + # Open the next file. my ($fh, $filename, $filesize) = $next_file->(); if ( $fh ) { PTDEBUG && _d('Reading', $filename); + PTDEBUG && _d('File size:', $filesize); push @read_files, $filename || "STDIN"; + # Read the file offset for --resume. + if ( $o->get('resume') && $filename ) { + $resume_file = $filename . '.resume'; + if ( -f $resume_file ) { + open my $resume_fh, '<', $resume_file + or die "Error opening $resume_file: $OS_ERROR"; + chomp(my $resume_offset = <$resume_fh>); + close $resume_fh + or die "Error close $resume_file: $OS_ERROR"; + if ( !looks_like_number($resume_offset) ) { + die "Offset $resume_offset in $resume_file " + . "does not look like a number.\n"; + } + PTDEBUG && _d('Resuming at offset', $resume_offset); + seek $fh, $resume_offset, 0 + or die "Error seeking to $resume_offset in " + . "$resume_file: $OS_ERROR"; + warn "Resuming $filename from offset $resume_offset " + . "(file size: $filesize)...\n"; + } + else { + PTDEBUG && _d('Not resuming', $filename, 'because', + $resume_file, 'does not exist'); + } + } + # Create callback to read next event. Some inputs, like # Processlist, may use something else but most next_event. if ( my $read_time = $o->get('read-timeout') ) { @@ -13842,11 +13878,11 @@ sub main { $args->{filename} = $filename; $args->{input_fh} = $fh; $args->{tell} = sub { - my $pos = tell $fh; + $offset = tell $fh; # update global $offset if ( $args->{filename} ) { - $args->{pos_for}->{$args->{filename}} = $pos; + $args->{pos_for}->{$args->{filename}} = $offset; } - return $pos; + return $offset; # legacy: return global $offset }; $args->{more_events} = 1; @@ -14352,10 +14388,6 @@ sub main { # we may just be between iters. $args->{Runtime}->reset(); $args->{time_left} = undef; - - if ( $args->{filename} ) { - $resume{$args->{filename}} = $args->{pos_for}->{$args->{filename}}; - } } # Continue the pipeline even if we reported and went to the next @@ -14728,6 +14760,8 @@ sub main { } PTDEBUG && _d("Pipeline data:", Dumper($pipeline_data)); + save_resume_offset(); + # Disconnect all open $dbh's map { $dp->disconnect($_); @@ -14736,8 +14770,6 @@ sub main { grep { $_ } ($qv_dbh, $qv_dbh2, $ps_dbh, $ep_dbh, $aux_dbh); - save_resume_data(); - return 0; } # End main() @@ -14880,29 +14912,16 @@ sub print_reports { return; } -sub save_resume_data { - return unless $save_resume; - return unless %resume; - if ( open my $resume_fh, q{>}, $resume_file ) { - while ( my ($k, $v) = each %resume ) { - print { $resume_fh } "$k\t$v\n"; - } - close $resume_fh; - } -} - # Catches signals so we can exit gracefully. sub sig_int { my ( $signal ) = @_; - - save_resume_data(); - if ( $oktorun ) { print STDERR "# Caught SIG$signal.\n"; $oktorun = 0; } else { print STDERR "# Exiting on SIG$signal.\n"; + save_resume_offset(); exit(1); } } @@ -15148,6 +15167,23 @@ sub verify_run_time { return $boundary; } +sub save_resume_offset { + if ( !$resume_file || !$offset ) { + PTDEBUG && _d('Not saving resume offset because there is no ' + . 'resume file or offset:', $resume_file, $offset); + return; + } + + PTDEBUG && _d('Saving resume at offset', $offset, 'to', $resume_file); + open my $resume_fh, '>', $resume_file + or die "Error opening $resume_file: $OS_ERROR"; + print { $resume_fh } $offset, "\n"; + close $resume_fh + or die "Error close $resume_file: $OS_ERROR"; + warn "\n# Saved resume file offset $offset to $resume_file\n"; + return; +} + sub _d { my ($package, undef, $line) = caller 0; @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } @@ -15697,7 +15733,9 @@ first option on the command line. default: yes -Continue parsing even if there is an error. +Continue parsing even if there is an error. The tool will not continue +forever: it stops once any process causes 100 errors, in which case there +is probably a bug in the tool or the input is invalid. =item --create-review-history-table @@ -16195,8 +16233,11 @@ See L<"OUTPUT"> for more information. =item --resume -If enabled, the tool will save the furthest it got into the log before exiting; -Future runs on that log with --resume enabled will start from that position. +Resume parsing from the last file offset. When specified, the tool +writes the last file offset to C where C is the original +file name given on the command line. When ran again with the exact same +file name, the tool reads the last file offset from C, +seeks to that position in the file, and resuming parsing events. =item --review diff --git a/bin/pt-table-usage b/bin/pt-table-usage index 69b8c891..f972cbd9 100755 --- a/bin/pt-table-usage +++ b/bin/pt-table-usage @@ -5345,7 +5345,7 @@ sub new { } my $self = { - instrument => 0, + instrument => PTDEBUG, continue_on_error => 0, %args, @@ -5372,9 +5372,7 @@ sub add { push @{$self->{procs}}, $process; push @{$self->{names}}, $name; - if ( my $n = $args{retry_on_error} ) { - $self->{retries}->{$name} = $n; - } + $self->{retries}->{$name} = $args{retry_on_error} || 100; if ( $self->{instrument} ) { $self->{instrumentation}->{$name} = { time => 0, calls => 0 }; } @@ -5443,7 +5441,11 @@ sub execute { my $msg = "Pipeline process " . ($procno + 1) . " ($name) caused an error: " . $EVAL_ERROR; - if ( defined $self->{retries}->{$name} ) { + if ( !$self->{continue_on_error} ) { + die $msg . "Terminating pipeline because --continue-on-error " + . "is false.\n"; + } + elsif ( defined $self->{retries}->{$name} ) { my $n = $self->{retries}->{$name}; if ( $n ) { warn $msg . "Will retry pipeline process $procno ($name) " @@ -5455,9 +5457,6 @@ sub execute { . "($name) caused too many errors.\n"; } } - elsif ( !$self->{continue_on_error} ) { - die $msg; - } else { warn $msg; } diff --git a/lib/Pipeline.pm b/lib/Pipeline.pm index ecea20bf..fcb47226 100644 --- a/lib/Pipeline.pm +++ b/lib/Pipeline.pm @@ -71,9 +71,7 @@ sub add { push @{$self->{procs}}, $process; push @{$self->{names}}, $name; - if ( my $n = $args{retry_on_error} ) { - $self->{retries}->{$name} = $n; - } + $self->{retries}->{$name} = $args{retry_on_error} || 100; if ( $self->{instrument} ) { $self->{instrumentation}->{$name} = { time => 0, calls => 0 }; } @@ -163,7 +161,11 @@ sub execute { my $msg = "Pipeline process " . ($procno + 1) . " ($name) caused an error: " . $EVAL_ERROR; - if ( defined $self->{retries}->{$name} ) { + if ( !$self->{continue_on_error} ) { + die $msg . "Terminating pipeline because --continue-on-error " + . "is false.\n"; + } + elsif ( defined $self->{retries}->{$name} ) { my $n = $self->{retries}->{$name}; if ( $n ) { warn $msg . "Will retry pipeline process $procno ($name) " @@ -175,9 +177,6 @@ sub execute { . "($name) caused too many errors.\n"; } } - elsif ( !$self->{continue_on_error} ) { - die $msg; - } else { warn $msg; } diff --git a/t/lib/Pipeline.t b/t/lib/Pipeline.t index 6cb5f0c3..dc21af96 100644 --- a/t/lib/Pipeline.t +++ b/t/lib/Pipeline.t @@ -261,7 +261,7 @@ $pipeline->add( ); $output = output( - sub { $pipeline->execute(%args) }, + sub {$pipeline->execute(%args); }, stderr => 1, ); From 9dd124038bf14101c788332e95682839272d9099 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 21 Jan 2013 12:40:46 -0300 Subject: [PATCH 25/34] pqd: Merge --review-history into --review, added --review-table & --history-table --- bin/pt-query-digest | 356 +++++++++++++++++------------- lib/QueryReview.pm | 4 +- t/lib/QueryReview.t | 2 - t/pt-query-digest/option_sanity.t | 37 +++- t/pt-query-digest/review.t | 45 ++-- 5 files changed, 254 insertions(+), 190 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 6e98a7d7..ec87793d 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -9132,7 +9132,7 @@ sub new { sub set_history_options { my ( $self, %args ) = @_; - foreach my $arg ( qw(table dbh tbl_struct col_pat) ) { + foreach my $arg ( qw(table tbl_struct col_pat) ) { die "I need a $arg argument" unless $args{$arg}; } @@ -9166,7 +9166,7 @@ sub set_history_options { } @cols) . ')'; PTDEBUG && _d($sql); - $self->{history_sth} = $args{dbh}->prepare($sql); + $self->{history_sth} = $self->{dbh}->prepare($sql); $self->{history_metrics} = \@metrics; return; @@ -13525,6 +13525,8 @@ my $aux_dbh; # For --aux-dsn (--since/--until "MySQL expression") my $resume_file; my $offset; +(my $tool = __PACKAGE__) =~ tr/_/-/; + sub main { # Reset global vars, else tests will fail. local @ARGV = @_; @@ -13555,11 +13557,6 @@ sub main { } if ( !$o->get('help') ) { - if ( $review_dsn - && (!defined $review_dsn->{D} || !defined $review_dsn->{t}) ) { - $o->save_error('The --review DSN requires a D (database) and t' - . ' (table) part specifying the query review table'); - } if ( $o->get('outliers') && grep { $_ !~ m/^\w+:[0-9.]+(?::[0-9.]+)?$/ } @{$o->get('outliers')} ) { @@ -13573,6 +13570,20 @@ sub main { } } + if ( my $review_dsn = $o->get('review') ) { + $o->save_error('--review does not accept a t option. Perhaps you meant ' + . 'to use --review-table or --history-table?') + if defined $review_dsn->{t}; + } + + for my $tables ('review-table', 'history-table') { + my $got = $o->get($tables); + if ( grep !defined, Quoter->split_unquote($got) ) { + $o->save_error("$tables should be passed a " + . "fully-qualified table name, got $got"); + } + } + if ( my $patterns = $o->get('embedded-attributes') ) { $o->save_error("--embedded-attributes should be passed two " . "comma-separated patterns, got " . scalar(@$patterns) ) @@ -13642,11 +13653,10 @@ sub main { } # ######################################################################## - # Set up for --review and --review-history. + # Set up for --review. # ######################################################################## my $qv; # QueryReview my $qv_dbh; # For QueryReview - my $qv_dbh2; # For QueryReview and --review-history if ( $review_dsn ) { my $tp = new TableParser(Quoter => $q); $qv_dbh = get_cxn( @@ -13657,28 +13667,33 @@ sub main { opts => { AutoCommit => 1 }, ); $qv_dbh->{InactiveDestroy} = 1; # Don't die on fork(). - my @db_tbl = @{$review_dsn}{qw(D t)}; - my $db_tbl = $q->quote(@db_tbl); - # Create the review table if desired - if ( $o->get('create-review-table') ) { - my $sql = $o->read_para_after( - __FILE__, qr/MAGIC_create_review/); - $sql =~ s/query_review/IF NOT EXISTS $db_tbl/; - PTDEBUG && _d($sql); - $qv_dbh->do($sql); - } + my @db_tbl = Quoter->split_unquote($o->get('review-table')); + my @hdb_tbl = Quoter->split_unquote($o->get('history-table')); - # Check for the existence of the table. - if ( !$tp->check_table( - dbh => $qv_dbh, - db => $db_tbl[0], - tbl => $db_tbl[1]) ) - { - die "The query review table $db_tbl does not exist. " - . "Specify --create-review-table to create it, " - . "and ensure that the MySQL user has privileges to create " - . "and update the table.\n"; + my $db_tbl = $q->quote(@db_tbl); + my $hdb_tbl = $q->quote(@hdb_tbl); + + my $create_review_sql = $o->read_para_after( + __FILE__, qr/MAGIC_create_review/); + $create_review_sql =~ s/query_review/IF NOT EXISTS $db_tbl/; + + my $create_history_sql = $o->read_para_after( + __FILE__, qr/MAGIC_create_review_history/); + $create_history_sql =~ s/query_review_history/IF NOT EXISTS $hdb_tbl/; + + for my $create ( + [ $db_tbl, $create_review_sql ], + [ $hdb_tbl, $create_history_sql ], + ) { + my ($tbl_name, $sql) = @$create; + create_review_tables( + dbh => $qv_dbh, + full_table => $tbl_name, + create_table_sql => $sql, + create_table => $o->get('create-review-tables'), + TableParser => $tp, + ); } # Set up the new QueryReview object. @@ -13690,77 +13705,41 @@ sub main { quoter => $q, ); - # Set up the review-history table - if ( my $review_history_dsn = $o->get('review-history') ) { - $qv_dbh2 = get_cxn( - for => '--review-history', - dsn => $review_history_dsn, - OptionParser => $o, - DSNParser => $dp, - opts => { AutoCommit => 1 }, - ); - $qv_dbh2->{InactiveDestroy} = 1; # Don't die on fork(). - my @hdb_tbl = @{$o->get('review-history')}{qw(D t)}; - my $hdb_tbl = $q->quote(@hdb_tbl); - - # Create the review-history table if desired - if ( $o->get('create-review-history-table') ) { - my $sql = $o->read_para_after( - __FILE__, qr/MAGIC_create_review_history/); - $sql =~ s/query_review_history/IF NOT EXISTS $hdb_tbl/; - PTDEBUG && _d($sql); - $qv_dbh2->do($sql); - } - - # Check for the existence of the table. - if ( !$tp->check_table( - dbh => $qv_dbh2, - db => $hdb_tbl[0], - tbl => $hdb_tbl[1]) ) - { - die "The query review history table $hdb_tbl does not exist. " - . "Specify --create-review-history-table to create it, " - . "and ensure that the MySQL user has privileges to create " - . "and update the table.\n"; - } - - # Inspect for MAGIC_history_cols. Add them to the --select list - # only if an explicit --select list was given. Otherwise, leave - # --select undef which will cause EventAggregator to aggregate every - # attribute available which will include the history columns. - # If no --select list was given and we make one by adding the history - # columsn to it, then EventAggregator will only aggregate the - # history columns and nothing else--we don't want this. - my $tbl = $tp->parse($tp->get_create_table($qv_dbh2, @hdb_tbl)); - my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); - $pat =~ s/\s+//g; - $pat = qr/^(.*?)_($pat)$/; - # Get original --select values. - my %select = map { $_ => 1 } @{$o->get('select')}; - foreach my $col ( @{$tbl->{cols}} ) { - my ( $attr, $metric ) = $col =~ m/$pat/; - next unless $attr && $metric; - $attr = ucfirst $attr if $attr =~ m/_/; # TableParser lowercases - # Add history table values to original select values. - $select{$attr}++; - } - - if ( $o->got('select') ) { - # Re-set --select with its original values plus the history - # table values. - $o->set('select', [keys %select]); - PTDEBUG && _d("--select after parsing --review-history table:", - @{$o->get('select')}); - } - - # And tell the QueryReview that it has more work to do. - $qv->set_history_options( - table => $hdb_tbl, - dbh => $qv_dbh2, - tbl_struct => $tbl, - col_pat => $pat, - ); + # Inspect for MAGIC_history_cols. Add them to the --select list + # only if an explicit --select list was given. Otherwise, leave + # --select undef which will cause EventAggregator to aggregate every + # attribute available which will include the history columns. + # If no --select list was given and we make one by adding the history + # columsn to it, then EventAggregator will only aggregate the + # history columns and nothing else--we don't want this. + my $tbl = $tp->parse($tp->get_create_table($qv_dbh, @hdb_tbl)); + my $pat = $o->read_para_after(__FILE__, qr/MAGIC_history_cols/); + $pat =~ s/\s+//g; + $pat = qr/^(.*?)_($pat)$/; + # Get original --select values. + my %select = map { $_ => 1 } @{$o->get('select')}; + foreach my $col ( @{$tbl->{cols}} ) { + my ( $attr, $metric ) = $col =~ $pat; + next unless $attr && $metric; + $attr = ucfirst $attr if $attr =~ m/_/; # TableParser lowercases + # Add history table values to original select values. + $select{$attr}++; } + + if ( $o->got('select') ) { + # Re-set --select with its original values plus the history + # table values. + $o->set('select', [sort keys %select]); + PTDEBUG && _d("--select after parsing the history table:", + @{$o->get('select')}); + } + + # And tell the QueryReview that it has more work to do. + $qv->set_history_options( + table => $hdb_tbl, + tbl_struct => $tbl, + col_pat => $pat, + ); } # ######################################################################## @@ -14152,7 +14131,7 @@ sub main { ); $aux_dbh->{InactiveDestroy} = 1; # Don't die on fork(). } - $aux_dbh ||= $qv_dbh || $qv_dbh2 || $ps_dbh || $ep_dbh; + $aux_dbh ||= $qv_dbh || $ps_dbh || $ep_dbh; PTDEBUG && _d('aux dbh:', $aux_dbh); my $time_callback = sub { @@ -14768,7 +14747,7 @@ sub main { PTDEBUG && _d('Disconnected dbh', $_); } grep { $_ } - ($qv_dbh, $qv_dbh2, $ps_dbh, $ep_dbh, $aux_dbh); + ($qv_dbh, $ps_dbh, $ep_dbh, $aux_dbh); return 0; } # End main() @@ -14777,6 +14756,77 @@ sub main { # Subroutines. # ############################################################################ +sub create_review_tables { + my ( %args ) = @_; + my @required_args = qw(dbh full_table TableParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $create_table_sql = $args{create_table_sql}; + my ($dbh, $full_table, $tp) = @args{@required_args}; + + PTDEBUG && _d('Checking --review table', $full_table); + + # If the repl db doesn't exit, auto-create it, maybe. + my ($db, $tbl) = Quoter->split_unquote($full_table); + my $show_db_sql = qq{SHOW DATABASES LIKE '$db'}; + PTDEBUG && _d($show_db_sql); + my @db_exists = $dbh->selectrow_array($show_db_sql); + if ( !@db_exists && !$args{create_table} ) { + die "--review database $db does not exist and " + . "--no-create-review-tables was specified. You need " + . "to create the database.\n"; + } + else { + # Even if the db already exists, do this in case it does not exist + # on a slave. + my $create_db_sql + = "CREATE DATABASE IF NOT EXISTS " + . Quoter->quote($db) + . " /* $tool */"; + PTDEBUG && _d($create_db_sql); + eval { + $dbh->do($create_db_sql); + }; + if ( $EVAL_ERROR && !@db_exists ) { + warn $EVAL_ERROR; + die "--review database $db does not exist and it cannot be " + . "created automatically. You need to create the database.\n"; + } + } + + # USE the correct db + my $sql = "USE " . Quoter->quote($db); + PTDEBUG && _d($sql); + $dbh->do($sql); + + # Check if the table exists; if not, create it, maybe. + my $tbl_exists = $tp->check_table( + dbh => $dbh, + db => $db, + tbl => $tbl, + ); + + PTDEBUG && _d('Table exists: ', $tbl_exists ? 'yes' : 'no'); + + if ( !$tbl_exists && !$args{create_table} ) { + die "Table $full_table does not exist and " + . "--no-create-review-tables was specified. " + . "You need to create the table.\n"; + } + else { + PTDEBUG && _d($dbh, $create_table_sql); + eval { + $dbh->do($create_table_sql); + }; + if ( $EVAL_ERROR && !$args{create_table} ) { + warn $EVAL_ERROR; + die "--review history table $full_table does not exist and it cannot be " + . "created automatically. You need to create the table.\n" + } + } +} + # TODO: This sub is poorly named since it does more than print reports: # it aggregates, reports, does QueryReview stuff, etc. sub print_reports { @@ -15086,17 +15136,15 @@ sub update_query_review_tables { first_seen => $stats->{ts}->{min}, last_seen => $stats->{ts}->{max} ); - if ( $o->get('review-history') ) { - my %history; - foreach my $attrib ( @$attribs ) { - $history{$attrib} = $ea->metrics( - attrib => $attrib, - where => $item, - ); - } - $qv->set_review_history( - $item, $sample->{arg} || '', %history); + my %history; + foreach my $attrib ( @$attribs ) { + $history{$attrib} = $ea->metrics( + attrib => $attrib, + where => $item, + ); } + $qv->set_review_history( + $item, $sample->{arg} || '', %history); } return; @@ -15540,9 +15588,9 @@ example, You can see how useful this meta-data is -- as you analyze your queries, you get your comments integrated right into the report. -If you add the L<"--review-history"> option, it will also store information into -a separate database table, so you can keep historical trending information on -classes of queries. +The tool will also store information into a separate database table specified +by the L<"--history-table"> option, so you can keep historical trending information +on classes of queries. =back @@ -15646,9 +15694,6 @@ Collapse multiple identical UNION queries into a single one. =head1 OPTIONS -DSN values in L<"--review-history"> default to values in L<"--review"> if COPY -is yes. - This tool accepts additional command-line arguments. Refer to the L<"SYNOPSIS"> and usage information for details. @@ -15737,19 +15782,13 @@ Continue parsing even if there is an error. The tool will not continue forever: it stops once any process causes 100 errors, in which case there is probably a bug in the tool or the input is invalid. -=item --create-review-history-table +=item --create-review-tables -Create the L<"--review-history"> table if it does not exist. +Create the L<"--review"> tables if they do not exist. -This option causes the table specified by L<"--review-history"> to be created -with the default structure shown in the documentation for that option. - -=item --create-review-table - -Create the L<"--review"> table if it does not exist. - -This option causes the table specified by L<"--review"> to be created with the -default structure shown in the documentation for that option. +This option causes the tables specified by L<"--review-table"> and +L<"--history-table"> to be created with the default structures shown +in the documentation for L<"--review">. =item --daemonize @@ -15967,6 +16006,12 @@ L<"ATTRIBUTES">). Show help and exit. +=item --history-table + +type: string; default: percona_schema.query_history + +Where to save the historical data produced by L<"--review">. + =item --host short form: -h; type: string @@ -16243,12 +16288,15 @@ seeks to that position in the file, and resuming parsing events. type: DSN -Store a sample of each class of query in this DSN. +Store a sample of each class of query in this DSN, plus historical values +for review trend analysis The argument specifies a table to store all unique query fingerprints in. The table must have at least the following columns. You can add more columns for your own special purposes, but they won't be used by pt-query-digest. The following CREATE TABLE definition is also used for L<"--create-review-table">. + +=for comment ignore-pt-internal-value MAGIC_create_review: CREATE TABLE query_review ( @@ -16283,23 +16331,12 @@ After parsing and aggregating events, your table should contain a row for each fingerprint. This option depends on C<--group-by fingerprint> (which is the default). It will not work otherwise. -=item --review-history -type: DSN +Additionally, pt-query-digest will save information into a review table, +so you can see how classes of queries have changed over time. You can +change the destination table with the L<"--history-table"> -The table in which to store historical values for review trend analysis. - -Each time you review queries with L<"--review">, pt-query-digest will save -information into this table so you can see how classes of queries have changed -over time. - -This DSN inherits unspecified values from L<"--review">. It should mention a -table in which to store statistics about each class of queries. pt-query-digest -verifies the existence of the table, and your privileges to insert, delete and -update on that table. - -pt-query-digest then inspects the columns in the table. The table must have at -least the following columns: +The table must have at least the following columns: CREATE TABLE query_review_history ( checksum BIGINT UNSIGNED NOT NULL, @@ -16308,7 +16345,10 @@ least the following columns: Any columns not mentioned above are inspected to see if they follow a certain naming convention. The column is special if the name ends with an underscore -followed by any of these MAGIC_history_cols values: +followed by any of these values: + +=for comment ignore-pt-internal-value +MAGIC_history_cols pct|avt|cnt|sum|min|max|pct_95|stddev|median|rank @@ -16324,8 +16364,11 @@ columns and making them part of the primary key along with the checksum. But you could also just add a ts_min column and make it a DATE type, so you'd get one row per class of queries per day. -The default table structure follows. The following MAGIC_create_review_history -table definition is used for L<"--create-review-history-table">: +The default table structure follows. The following table definition is used +for L<"--create-review-tables">: + +=for comment ignore-pt-internal-value +MAGIC_create_review_history CREATE TABLE query_review_history ( checksum BIGINT UNSIGNED NOT NULL, @@ -16431,6 +16474,12 @@ table definition is used for L<"--create-review-history-table">: Note that we store the count (cnt) for the ts attribute only; it will be redundant to store this for other attributes. +=item --review-table + +type: string; default: percona_schema.query_review + +Where to save the samples produced by L<"--review">. + =item --run-time type: time @@ -16533,7 +16582,7 @@ Previously, pt-query-digest only aggregated these attributes: Query_time,Lock_time,Rows_sent,Rows_examined,user,db:Schema,ts -Attributes specified in the L<"--review-history"> table will always be selected +Attributes in the table specified by L<"--history-table"> will always be selected even if you do not specify L<"--select">. See also L<"--ignore-attributes"> and L<"ATTRIBUTES">. @@ -16595,9 +16644,9 @@ several types: CURRENT_DATE - INTERVAL 7 DAY If you give a MySQL time expression, then you must also specify a DSN -so that pt-query-digest can connect to MySQL to evaluate the expression. If you -specify L<"--explain">, L<"--processlist">, L<"--review"> -or L<"--review-history">, then one of these DSNs will be used automatically. +so that pt-query-digest can connect to MySQL to evaluate the expression. +If you specify L<"--explain">, L<"--processlist">, L<"--review">, then +one of these DSNs will be used automatically. Otherwise, you must specify an L<"--aux-dsn"> or pt-query-digest will die saying that the value is invalid. @@ -16917,7 +16966,8 @@ Default character set. dsn: database; copy: yes -Database that contains the query review table. +Default database for the review option. Only useful if there are replication +filters set up. =item * F @@ -16951,7 +17001,7 @@ Socket file to use for connection. =item * t -Table to use as the query review table. +Not used. =item * u diff --git a/lib/QueryReview.pm b/lib/QueryReview.pm index aad98d8d..699aae6c 100644 --- a/lib/QueryReview.pm +++ b/lib/QueryReview.pm @@ -111,7 +111,7 @@ sub new { # table. sub set_history_options { my ( $self, %args ) = @_; - foreach my $arg ( qw(table dbh tbl_struct col_pat) ) { + foreach my $arg ( qw(table tbl_struct col_pat) ) { die "I need a $arg argument" unless $args{$arg}; } @@ -157,7 +157,7 @@ sub set_history_options { } @cols) . ')'; PTDEBUG && _d($sql); - $self->{history_sth} = $args{dbh}->prepare($sql); + $self->{history_sth} = $self->{dbh}->prepare($sql); $self->{history_metrics} = \@metrics; return; diff --git a/t/lib/QueryReview.t b/t/lib/QueryReview.t index c5bba0c1..950953d4 100644 --- a/t/lib/QueryReview.t +++ b/t/lib/QueryReview.t @@ -161,7 +161,6 @@ my $hist_struct = $tp->parse( $qv->set_history_options( table => 'test.query_review_history', - dbh => $dbh, quoter => $q, tbl_struct => $hist_struct, col_pat => qr/^(.*?)_($pat)$/, @@ -257,7 +256,6 @@ $hist_struct = $tp->parse( $tp->get_create_table($dbh, 'test', 'query_review_history')); $qv->set_history_options( table => 'test.query_review_history', - dbh => $dbh, quoter => $q, tbl_struct => $hist_struct, col_pat => qr/^(.*?)_($pat)$/, diff --git a/t/pt-query-digest/option_sanity.t b/t/pt-query-digest/option_sanity.t index a7eb4956..b418ff68 100644 --- a/t/pt-query-digest/option_sanity.t +++ b/t/pt-query-digest/option_sanity.t @@ -13,14 +13,29 @@ use Test::More; use PerconaTest; +my $cmd = "$trunk/bin/pt-query-digest"; +my $help = qx{$cmd --help}; + # ############################################################################# # Test cmd line op sanity. # ############################################################################# -my $output = `$trunk/bin/pt-query-digest --review h=127.1,P=12345,u=msandbox,p=msandbox`; -like($output, qr/--review DSN requires a D/, 'Dies if no D part in --review DSN'); +my $output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test`; +like($output, qr/--review-table requires a fully/, 'Dies if no database part in --review-table'); -$output = `$trunk/bin/pt-query-digest --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test`; -like($output, qr/--review DSN requires a D/, 'Dies if no t part in --review DSN'); +$output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=test`; +like($output, qr/--review does not accept a t option/, 'Dies if t part in --review DSN'); + +like( + $help, + qr/review-table\s+\Qpercona_schema.query_review\E/, + "--review-table has a sane default" +); + +like( + $help, + qr/history-table\s+\Qpercona_schema.query_history\E/, + "--history-table has a sane default" +); # ############################################################################# # https://bugs.launchpad.net/percona-toolkit/+bug/885382 @@ -34,25 +49,25 @@ my @options = qw( --group-by file ); -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes should be passed two comma-separated patterns, got 1/, 'Bug 885382: --embedded-attributes cardinality'; -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*,(?{1234})' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*,(?{1234})' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes Eval-group /, "Bug 885382: --embedded-attributes rejects invalid patterns early"; -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*,(?*asdasd' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*,(?*asdasd' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes Sequence (?*...) not recognized/, "Bug 885382: --embedded-attributes rejects invalid patterns early"; -$output = `$trunk/bin/pt-query-digest @options --embedded-attributes '-- .*,[:alpha:]' $sample.slow010.txt`; +$output = `$cmd @options --embedded-attributes '-- .*,[:alpha:]' $sample.slow010.txt`; like $output, qr/\Q--embedded-attributes POSIX syntax [: :] belongs inside character/, @@ -61,7 +76,7 @@ like $output, # We removed --statistics, but they should still print out if we use PTDEBUG. -$output = qx{PTDEBUG=1 $trunk/bin/pt-query-digest --no-report ${sample}slow002.txt 2>&1}; +$output = qx{PTDEBUG=1 $cmd --no-report ${sample}slow002.txt 2>&1}; my $stats = slurp_file("t/pt-query-digest/samples/stats-slow002.txt"); like( @@ -81,10 +96,8 @@ like( # https://bugs.launchpad.net/percona-toolkit/+bug/831525 # ############################################################################# -$output = `$trunk/bin/pt-query-digest --help`; - like( - $output, + $help, qr/\Q--report-format=A\E\s* \QPrint these sections of the query analysis\E\s* \Qreport (default rusage,date,hostname,files,\E\s* diff --git a/t/pt-query-digest/review.t b/t/pt-query-digest/review.t index bf980c3d..8ad93f76 100644 --- a/t/pt-query-digest/review.t +++ b/t/pt-query-digest/review.t @@ -22,9 +22,6 @@ my $dbh = $sb->get_dbh_for('master'); if ( !$dbh ) { plan skip_all => 'Cannot connect to sandbox master'; } -else { - plan tests => 18; -} sub normalize_numbers { use Scalar::Util qw(looks_like_number); @@ -43,21 +40,21 @@ $sb->load_file('master', 't/pt-query-digest/samples/query_review.sql'); # Test --create-review and --create-review-history-table $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow006.txt --create-review-table --review " - . "h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --create-review-history-table " - . "--review-history t=query_review_history"; +$cmd = "${run_with}slow006.txt --create-review-tables --review " + . "h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review " + . "--history-table test.query_review_history"; $output = `$cmd >/dev/null 2>&1`; my ($table) = $dbh->selectrow_array( "show tables from test like 'query_review'"); -is($table, 'query_review', '--create-review'); +is($table, 'query_review', '--create-review-tables'); ($table) = $dbh->selectrow_array( "show tables from test like 'query_review_history'"); -is($table, 'query_review_history', '--create-review-history-table'); +is($table, 'query_review_history', '--create-review-tables'); $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow006.txt --review h=127.1,u=msandbox,p=msandbox,P=12345,D=test,t=query_review " - . "--review-history t=query_review_history"; +$cmd = "${run_with}slow006.txt --review h=127.1,u=msandbox,p=msandbox,P=12345 --review-table test.query_review " + . "--history-table test.query_review_history"; $output = `$cmd`; my $res = $dbh->selectall_arrayref( 'SELECT * FROM test.query_review', { Slice => {} } ); @@ -181,17 +178,21 @@ is_deeply( # have been reviewed, the report should include both of them with # their respective query review info added to the report. ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review', "t/pt-query-digest/samples/slow006_AR_1.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --create-review-tables', "t/pt-query-digest/samples/slow006_AR_1.txt"), 'Analyze-review pass 1 reports not-reviewed queries' ); +($table) = $dbh->selectrow_array( + "show tables from percona_schema like 'query_history'"); +is($table, 'query_history', '--create-review-tables creates both percona_schema and query_review_history'); + # Mark a query as reviewed and run --report again and that query should # not be reported. $dbh->do('UPDATE test.query_review SET reviewed_by="daniel", reviewed_on="2008-12-24 12:00:00", comments="foo_tbl is ok, so are cranberries" WHERE checksum=11676753765851784517'); ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review', "t/pt-query-digest/samples/slow006_AR_2.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review', "t/pt-query-digest/samples/slow006_AR_2.txt"), 'Analyze-review pass 2 does not report the reviewed query' ); @@ -199,7 +200,7 @@ ok( # to re-appear in the report with the reviewed_by, reviewed_on and comments # info included. ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --report-all', "t/pt-query-digest/samples/slow006_AR_4.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --report-all', "t/pt-query-digest/samples/slow006_AR_4.txt"), 'Analyze-review pass 4 with --report-all reports reviewed query' ); @@ -208,7 +209,7 @@ $dbh->do('ALTER TABLE test.query_review ADD COLUMN foo INT'); $dbh->do('UPDATE test.query_review SET foo=42 WHERE checksum=15334040482108055940'); ok( - no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review', "t/pt-query-digest/samples/slow006_AR_5.txt"), + no_diff($run_with.'slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review', "t/pt-query-digest/samples/slow006_AR_5.txt"), 'Analyze-review pass 5 reports new review info column' ); @@ -217,7 +218,7 @@ ok( $dbh->do("update test.query_review set first_seen='0000-00-00 00:00:00', " . " last_seen='0000-00-00 00:00:00'"); $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review"; +$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review"; $output = `$cmd`; unlike($output, qr/last_seen/, 'no last_seen when 0000 timestamp'); unlike($output, qr/first_seen/, 'no first_seen when 0000 timestamp'); @@ -231,7 +232,7 @@ unlike($output, qr/0000-00-00 00:00:00/, 'no 0000-00-00 00:00:00 timestamp'); # Make sure a missing Time property does not cause a crash. Don't test data # in table, because it varies based on when you run the test. $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow021.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review"; +$cmd = "${run_with}slow021.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review"; $output = `$cmd`; unlike($output, qr/Use of uninitialized value/, 'didnt crash due to undef ts'); @@ -239,7 +240,7 @@ unlike($output, qr/Use of uninitialized value/, 'didnt crash due to undef ts'); # crash. Don't test data in table, because it varies based on when you run # the test. $output = 'foo'; # clear previous test results -$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review"; +$cmd = "${run_with}slow022.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review"; $output = `$cmd`; # Don't test data in table, because it varies based on when you run the test. unlike($output, qr/Use of uninitialized value/, 'no crash due to totally missing ts'); @@ -248,7 +249,7 @@ unlike($output, qr/Use of uninitialized value/, 'no crash due to totally missing # --review --no-report # ############################################################################# $sb->load_file('master', 't/pt-query-digest/samples/query_review.sql'); -$output = `${run_with}slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --no-report --create-review-table`; +$output = `${run_with}slow006.txt --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --no-report --create-review-table`; $res = $dbh->selectall_arrayref('SELECT * FROM test.query_review'); is( $res->[0]->[1], @@ -268,7 +269,7 @@ is( $dbh->do('truncate table test.query_review'); $dbh->do('truncate table test.query_review_history'); -`${run_with}slow002.txt --review h=127.1,u=msandbox,p=msandbox,P=12345,D=test,t=query_review --review-history t=query_review_history --no-report --filter '\$event->{arg} =~ m/foo\.bar/' > /dev/null`; +`${run_with}slow002.txt --review h=127.1,u=msandbox,p=msandbox,P=12345 --review-table test.query_review --history-table test.query_review_history --no-report --filter '\$event->{arg} =~ m/foo\.bar/' > /dev/null`; $res = $dbh->selectall_arrayref( 'SELECT * FROM test.query_review_history', { Slice => {} } ); @@ -396,8 +397,9 @@ $dbh->do($min_tbl); $output = output( sub { pt_query_digest::main( - '--review', 'h=127.1,u=msandbox,p=msandbox,P=12345,D=test,t=query_review', - '--review-history', 't=query_review_history', + '--review', 'h=127.1,u=msandbox,p=msandbox,P=12345', + '--review-table', 'test.query_review', + '--history-table', 'test.query_review_history', qw(--no-report --no-continue-on-error), "$trunk/t/lib/samples/slow002.txt") }, @@ -415,4 +417,5 @@ unlike( # ############################################################################# $sb->wipe_clean($dbh); ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); +done_testing; exit; From 8c5d8e4d3d12b9ace62c327066142e7e299e9f54 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Mon, 21 Jan 2013 13:20:10 -0300 Subject: [PATCH 26/34] t/pt-query-digest/issue_360.t: use --review-table --- t/pt-query-digest/issue_360.t | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/t/pt-query-digest/issue_360.t b/t/pt-query-digest/issue_360.t index 65069d3b..b3a1effc 100644 --- a/t/pt-query-digest/issue_360.t +++ b/t/pt-query-digest/issue_360.t @@ -34,11 +34,11 @@ else { my $pid_file = "/tmp/pt-query-digest-test-issue_360.t.$PID"; # Need a clean query review table. -$sb->create_dbs($dbh, [qw(test)]); +$sb->create_dbs($dbh, [qw(test percona_schema)]); # Run pt-query-digest in the background for 2s, # saving queries to test.query_review. -diag(`$trunk/bin/pt-query-digest --processlist h=127.1,P=12345,u=msandbox,p=msandbox --interval 0.01 --create-review-table --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=query_review --daemonize --pid $pid_file --log /dev/null --run-time 2`); +diag(`$trunk/bin/pt-query-digest --processlist h=127.1,P=12345,u=msandbox,p=msandbox --interval 0.01 --create-review-table --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test.query_review --daemonize --pid $pid_file --log /dev/null --run-time 2`); # Wait until its running. PerconaTest::wait_for_files($pid_file); From b21baeb5d8f80eb26da95dd0f21548e42feecfec Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Tue, 22 Jan 2013 19:33:11 -0300 Subject: [PATCH 27/34] pqd: Changed --resume to take a filename, added tests --- bin/pt-query-digest | 16 ++-- t/pt-query-digest/resume.t | 163 +++++++++++++++++++++++++++++++++++++ 2 files changed, 171 insertions(+), 8 deletions(-) create mode 100644 t/pt-query-digest/resume.t diff --git a/bin/pt-query-digest b/bin/pt-query-digest index ec87793d..84366c4b 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -13820,9 +13820,8 @@ sub main { push @read_files, $filename || "STDIN"; # Read the file offset for --resume. - if ( $o->get('resume') && $filename ) { - $resume_file = $filename . '.resume'; - if ( -f $resume_file ) { + if ( ($resume_file = $o->get('resume')) && $filename ) { + if ( -s $resume_file ) { open my $resume_fh, '<', $resume_file or die "Error opening $resume_file: $OS_ERROR"; chomp(my $resume_offset = <$resume_fh>); @@ -16278,11 +16277,12 @@ See L<"OUTPUT"> for more information. =item --resume -Resume parsing from the last file offset. When specified, the tool -writes the last file offset to C where C is the original -file name given on the command line. When ran again with the exact same -file name, the tool reads the last file offset from C, -seeks to that position in the file, and resuming parsing events. +type: string + +If specified, the tool writes the last file offset, if there is one, +to the given filename. When ran again with the same value for this option, +the tool reads the last file offset from the file, seeks to that position +in the log, and resumes parsing events from that point onward. =item --review diff --git a/t/pt-query-digest/resume.t b/t/pt-query-digest/resume.t new file mode 100644 index 00000000..ccc2ca65 --- /dev/null +++ b/t/pt-query-digest/resume.t @@ -0,0 +1,163 @@ +#!/usr/bin/env perl + +BEGIN { + die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n" + unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH}; + unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib"; +}; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Test::More; + +use IO::File; +use Fcntl qw(:seek); +use File::Temp qw(tempfile); + +use PerconaTest; +use Sandbox; +require "$trunk/bin/pt-query-digest"; + +my $dp = new DSNParser(opts=>$dsn_opts); +my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp); +my $dbh = $sb->get_dbh_for('master'); + +if ( !$dbh ) { + plan skip_all => 'Cannot connect to sandbox master'; +} + +my $samples = "$trunk/t/lib/samples/slowlogs"; +my $output; + +$sb->create_dbs($dbh, ['test']); + +my $resume_file = (tempfile())[1]; + +my ($fh, $filename) = tempfile(UNLINK => 1); +$fh->autoflush(1); + +sub resume_offset_ok { + my ($resume_file, $file, $msg) = @_; + chomp(my $offset = slurp_file($resume_file)); + open my $tmp_fh, q{<}, $file or die $OS_ERROR; + seek $tmp_fh, 0, SEEK_END; + is(tell($tmp_fh), $offset, $msg); +} + +sub run_pqd { + my @extra_args = @_; + my $run = output(sub { pt_query_digest::main(qw(--limit 10), @extra_args, $filename) }, stderr => 1); + $run =~ s/\d+ms user time.+//; + $run =~ s/Current date: .+//; + return $run; +} + +print { $fh } slurp_file("$samples/slow006.txt"); + +my @runs; +push @runs, run_pqd() for 1, 2; + +is($runs[0], $runs[1], "Sanity check: Behaves the same between runs without --resume"); + +my @resume_runs; +push @resume_runs, run_pqd('--resume', $resume_file) for 1, 2; + +(my $without_resume_line = $resume_runs[0]) =~ s/\n\n. Saved resume file offset.+//; +is( + $runs[0], + $without_resume_line, + "First time with --resume just like the first time without" +); + +like( + $resume_runs[0], + qr/\QSaved resume file offset\E/, + "SAves offset with --resume" +); + +like( + $resume_runs[1], + qr/\QNo events processed.\E/, + "..and there are no events on the second run" +); + +resume_offset_ok($resume_file, $filename, "The resume file has the correct offset"); + +print { $fh } slurp_file("$samples/slow002.txt"); + +push @resume_runs, run_pqd('--resume', $resume_file) for 1, 2; + +unlike( + $resume_runs[2], + qr/\QNo events processed.\E/, + "New run detects new events" +); + +like( + $resume_runs[3], + qr/\QNo events processed.\E/, + "And running again after that finds nothing new" +); + +resume_offset_ok($resume_file, $filename, "The resume file has the updated offset"); + +unlink($resume_file); + +close $fh; + +# ############################################################################# +# Now test the itneraction with --run-time-mode interval +# ############################################################################# + +($fh, $filename) = tempfile(UNLINK => 1); +$fh->autoflush(1); + +print { $fh } slurp_file("$trunk/t/lib/samples/slowlogs/slow033.txt"); + +my @run_args = (qw(--run-time-mode interval --run-time 1d --iterations 0), + qw(--report-format query_report)); +my @resume_args = (@run_args, '--resume', $resume_file); + +my @run_time; +push @run_time, run_pqd(@resume_args) for 1,2; + +resume_offset_ok($resume_file, $filename, "The resume file has the correct offset when using --run-time-mode interval"); + +print { $fh } slurp_file("$samples/slow002.txt"); + +push @run_time, run_pqd(@resume_args) for 1,2; + +resume_offset_ok($resume_file, $filename, "...and it updates correctly"); + +like( + $_, + qr/\QNo events processed.\E/, + "Runs 2 & 4 find no new data" +) for @run_time[1, 3]; + +# This shows up in the first report, but shouldn't show up in there +# third run, after we add new events to the file. +my $re = qr/\QSELECT * FROM foo\E/; + +unlike( + $run_time[2], + $re, + "Events from the first run are correctly ignored" +); + +my $no_resume = run_pqd(@run_args); + +like( + $no_resume, + $re, + "...but do show up if run without resume" +); + +# ############################################################################# +# Done. +# ############################################################################# +$sb->wipe_clean($dbh); +ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox"); +done_testing; +exit; From d1bdd4d80c811f95dee6867f7e3453ab1e0cc2fa Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Thu, 24 Jan 2013 12:00:48 -0300 Subject: [PATCH 28/34] Make pqd --output json have flags to enable sorted keys and prettyfied JSON --- bin/pt-query-digest | 18 +- lib/JSONReportFormatter.pm | 18 +- t/pt-query-digest/output.t | 4 + .../samples/output_json_slow002.txt | 203 +++++++++- .../samples/output_json_tcpdump021.txt | 359 +++++++++++++++++- 5 files changed, 596 insertions(+), 6 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 84366c4b..bc3f206c 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -7977,7 +7977,7 @@ sub _d { { package JSONReportFormatter; use Mo; -use JSON; +use JSON (); use List::Util qw(sum); @@ -7985,8 +7985,22 @@ use Transformers qw(make_checksum parse_timestamp); use constant PTDEBUG => $ENV{PTDEBUG} || 0; +our $pretty_json = undef; +our $sorted_json = undef; + extends qw(QueryReportFormatter); +has _json => ( + is => 'ro', + init_arg => undef, + builder => '_build_json', + handles => { encode_json => 'encode' }, +); + +sub _build_json { + return JSON->new->utf8->pretty($pretty_json)->canonical($sorted_json); +} + override [qw(rusage date hostname files header profile prepared)] => sub { return; }; @@ -8062,7 +8076,7 @@ override query_report => sub { }; } - return encode_json(\@queries) . "\n"; + return $self->encode_json(\@queries) . "\n"; }; 1; diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index 331e08de..27118a6b 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -1,7 +1,7 @@ { package JSONReportFormatter; use Mo; -use JSON; +use JSON (); use List::Util qw(sum); @@ -9,8 +9,22 @@ use Transformers qw(make_checksum parse_timestamp); use constant PTDEBUG => $ENV{PTDEBUG} || 0; +our $pretty_json = undef; +our $sorted_json = undef; + extends qw(QueryReportFormatter); +has _json => ( + is => 'ro', + init_arg => undef, + builder => '_build_json', + handles => { encode_json => 'encode' }, +); + +sub _build_json { + return JSON->new->utf8->pretty($pretty_json)->canonical($sorted_json); +} + override [qw(rusage date hostname files header profile prepared)] => sub { return; }; @@ -89,7 +103,7 @@ override query_report => sub { }; } - return encode_json(\@queries) . "\n"; + return $self->encode_json(\@queries) . "\n"; }; 1; diff --git a/t/pt-query-digest/output.t b/t/pt-query-digest/output.t index 9af5b9a8..f61bb572 100644 --- a/t/pt-query-digest/output.t +++ b/t/pt-query-digest/output.t @@ -14,6 +14,10 @@ use Test::More; use PerconaTest; require "$trunk/bin/pt-query-digest"; +no warnings 'once'; +local $JSONReportFormatter::sorted_json = 1; +local $JSONReportFormatter::pretty_json = 1; + my @args = qw(--output json); my $sample = "$trunk/t/lib/samples"; my $results = "t/pt-query-digest/samples"; diff --git a/t/pt-query-digest/samples/output_json_slow002.txt b/t/pt-query-digest/samples/output_json_slow002.txt index c2798ea4..f8604c65 100644 --- a/t/pt-query-digest/samples/output_json_slow002.txt +++ b/t/pt-query-digest/samples/output_json_slow002.txt @@ -1,2 +1,203 @@ -[{"class":{"checksum":"66825DDC008FFA89","ts_min":"2007-12-18 11:48:27","ts_max":"2007-12-18 11:48:27","fingerprint":"update d?tuningdetail_?_? n inner join d?gonzo a using(gonzo) set n.column? = a.column?, n.word? = a.word?","sample":"update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3","cnt":1},"attributes":{"bytes":{"pct":"0.12","avg":"129.000000","min":"129.000000","max":"129.000000","median":"129.000000","cnt":"1.000000","stddev":0,"pct_95":"129.000000","sum":"129.000000"},"db":{"pct":0.142857142857143,"avg":0,"min":"db1","max":"db1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Full_join":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"Merge_passes":{"pct":"0.12","avg":0,"min":"0","max":"0","median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Filesort":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"user":{"pct":0.125,"avg":0,"min":"[SQL_SLAVE]","max":"[SQL_SLAVE]","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Rows_sent":{"pct":"0.12","avg":0,"min":"0","max":"0","median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Lock_time":{"pct":"0.12","avg":"0.000091","min":"0.000091","max":"0.000091","median":"0.000091","cnt":"1.000000","stddev":0,"pct_95":"0.000091","sum":"0.000091"},"Full_scan":{"pct":0.125,"avg":1,"min":1,"max":1,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":1},"Filesort_on_disk":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"host":{"pct":0.125,"avg":0,"min":"","max":"","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.12","avg":"338.000000","min":"338.000000","max":"338.000000","median":"338.000000","cnt":"1.000000","stddev":0,"pct_95":"338.000000","sum":"338.000000"},"Tmp_table":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"QC_Hit":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"Tmp_table_on_disk":{"pct":0.125,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"Rows_examined":{"pct":"0.12","avg":"62951.000000","min":"62951.000000","max":"62951.000000","median":"62951.000000","cnt":"1.000000","stddev":0,"pct_95":"62951.000000","sum":"62951.000000"},"Query_time":{"pct":"0.12","avg":"0.726052","min":"0.726052","max":"0.726052","median":"0.726052","cnt":"1.000000","stddev":0,"pct_95":"0.726052","sum":"0.726052"}}}] +[ + { + "attributes" : { + "Filesort" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Filesort_on_disk" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Full_join" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Full_scan" : { + "avg" : 1, + "cnt" : 1, + "max" : 1, + "median" : 0, + "min" : 1, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 1 + }, + "Lock_time" : { + "avg" : "0.000091", + "cnt" : "1.000000", + "max" : "0.000091", + "median" : "0.000091", + "min" : "0.000091", + "pct" : "0.12", + "pct_95" : "0.000091", + "stddev" : 0, + "sum" : "0.000091" + }, + "Merge_passes" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : "0", + "median" : 0, + "min" : "0", + "pct" : "0.12", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "QC_Hit" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Query_time" : { + "avg" : "0.726052", + "cnt" : "1.000000", + "max" : "0.726052", + "median" : "0.726052", + "min" : "0.726052", + "pct" : "0.12", + "pct_95" : "0.726052", + "stddev" : 0, + "sum" : "0.726052" + }, + "Rows_examined" : { + "avg" : "62951.000000", + "cnt" : "1.000000", + "max" : "62951.000000", + "median" : "62951.000000", + "min" : "62951.000000", + "pct" : "0.12", + "pct_95" : "62951.000000", + "stddev" : 0, + "sum" : "62951.000000" + }, + "Rows_sent" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : "0", + "median" : 0, + "min" : "0", + "pct" : "0.12", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Tmp_table" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Tmp_table_on_disk" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "129.000000", + "cnt" : "1.000000", + "max" : "129.000000", + "median" : "129.000000", + "min" : "129.000000", + "pct" : "0.12", + "pct_95" : "129.000000", + "stddev" : 0, + "sum" : "129.000000" + }, + "db" : { + "avg" : 0, + "cnt" : 1, + "max" : "db1", + "median" : 0, + "min" : "db1", + "pct" : 0.142857142857143, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "", + "median" : 0, + "min" : "", + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : "338.000000", + "cnt" : "1.000000", + "max" : "338.000000", + "median" : "338.000000", + "min" : "338.000000", + "pct" : "0.12", + "pct_95" : "338.000000", + "stddev" : 0, + "sum" : "338.000000" + }, + "user" : { + "avg" : 0, + "cnt" : 1, + "max" : "[SQL_SLAVE]", + "median" : 0, + "min" : "[SQL_SLAVE]", + "pct" : 0.125, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + } + }, + "class" : { + "checksum" : "66825DDC008FFA89", + "cnt" : 1, + "fingerprint" : "update d?tuningdetail_?_? n inner join d?gonzo a using(gonzo) set n.column? = a.column?, n.word? = a.word?", + "sample" : "update db2.tuningdetail_21_265507 n\n inner join db1.gonzo a using(gonzo) \n set n.column1 = a.column1, n.word3 = a.word3", + "ts_max" : "2007-12-18 11:48:27", + "ts_min" : "2007-12-18 11:48:27" + } + } +] + diff --git a/t/pt-query-digest/samples/output_json_tcpdump021.txt b/t/pt-query-digest/samples/output_json_tcpdump021.txt index d9252715..1f5b8826 100644 --- a/t/pt-query-digest/samples/output_json_tcpdump021.txt +++ b/t/pt-query-digest/samples/output_json_tcpdump021.txt @@ -1,2 +1,359 @@ -[{"class":{"checksum":"AA8E9FA785927259","ts_min":"2009-12-08 09:23:49.637394","ts_max":"2009-12-08 09:23:49.637394","fingerprint":"prepare select i from d.t where i=?","sample":"PREPARE SELECT i FROM d.t WHERE i=?","cnt":1},"attributes":{"bytes":{"pct":"0.33","avg":"35.000000","min":"35.000000","max":"35.000000","median":"35.000000","cnt":"1.000000","stddev":0,"pct_95":"35.000000","sum":"35.000000"},"No_good_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"No_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"host":{"pct":0.333333333333333,"avg":0,"min":"127.0.0.1","max":"127.0.0.1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Rows_affected":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Statement_id":{"pct":0.5,"avg":0,"min":2,"max":2,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Error_no":{"pct":0.333333333333333,"avg":0,"min":"none","max":"none","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Query_time":{"pct":"0.33","avg":"0.000286","min":"0.000286","max":"0.000286","median":"0.000286","cnt":"1.000000","stddev":0,"pct_95":"0.000286","sum":"0.000286"},"Warning_count":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0}}},{"class":{"checksum":"3F79759E7FA2F117","ts_min":"2009-12-08 09:23:49.637892","ts_max":"2009-12-08 09:23:49.637892","fingerprint":"execute select i from d.t where i=?","sample":"EXECUTE SELECT i FROM d.t WHERE i=\"3\"","cnt":1},"attributes":{"bytes":{"pct":"0.33","avg":"37.000000","min":"37.000000","max":"37.000000","median":"37.000000","cnt":"1.000000","stddev":0,"pct_95":"37.000000","sum":"37.000000"},"No_good_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"No_index_used":{"pct":0.333333333333333,"avg":1,"min":1,"max":1,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":1},"host":{"pct":0.333333333333333,"avg":0,"min":"127.0.0.1","max":"127.0.0.1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.33","avg":"1106.000000","min":"1106.000000","max":"1106.000000","median":"1106.000000","cnt":"1.000000","stddev":0,"pct_95":"1106.000000","sum":"1106.000000"},"Rows_affected":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Statement_id":{"pct":0.5,"avg":0,"min":"2","max":"2","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Error_no":{"pct":0.333333333333333,"avg":0,"min":"none","max":"none","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Query_time":{"pct":"0.33","avg":"0.000281","min":"0.000281","max":"0.000281","median":"0.000281","cnt":"1.000000","stddev":0,"pct_95":"0.000281","sum":"0.000281"},"Warning_count":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0}}},{"class":{"checksum":"AA353644DE4C4CB4","ts_min":"2009-12-08 09:23:49.638381","ts_max":"2009-12-08 09:23:49.638381","fingerprint":"administrator command: Quit","sample":"administrator command: Quit","cnt":1},"attributes":{"bytes":{"pct":"0.33","avg":"27.000000","min":"27.000000","max":"27.000000","median":"27.000000","cnt":"1.000000","stddev":0,"pct_95":"27.000000","sum":"27.000000"},"No_good_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"No_index_used":{"pct":0.333333333333333,"avg":0,"min":0,"max":0,"median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":0},"host":{"pct":0.333333333333333,"avg":0,"min":"127.0.0.1","max":"127.0.0.1","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"pos_in_log":{"pct":"0.33","avg":"1850.000000","min":"1850.000000","max":"1850.000000","median":"1850.000000","cnt":"1.000000","stddev":0,"pct_95":"1850.000000","sum":"1850.000000"},"Rows_affected":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0},"Error_no":{"pct":0.333333333333333,"avg":0,"min":"none","max":"none","median":0,"cnt":1,"stddev":0,"pct_95":0,"sum":null},"Query_time":{"pct":"0.33","avg":0,"min":"0.000000","max":"0.000000","median":"0.000000","cnt":"1.000000","stddev":0,"pct_95":"0.000000","sum":0},"Warning_count":{"pct":"0.33","avg":0,"min":0,"max":0,"median":0,"cnt":"1.000000","stddev":0,"pct_95":0,"sum":0}}}] +[ + { + "attributes" : { + "Error_no" : { + "avg" : 0, + "cnt" : 1, + "max" : "none", + "median" : 0, + "min" : "none", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "No_good_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "No_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Query_time" : { + "avg" : "0.000286", + "cnt" : "1.000000", + "max" : "0.000286", + "median" : "0.000286", + "min" : "0.000286", + "pct" : "0.33", + "pct_95" : "0.000286", + "stddev" : 0, + "sum" : "0.000286" + }, + "Rows_affected" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Statement_id" : { + "avg" : 0, + "cnt" : 1, + "max" : 2, + "median" : 0, + "min" : 2, + "pct" : 0.5, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "Warning_count" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "35.000000", + "cnt" : "1.000000", + "max" : "35.000000", + "median" : "35.000000", + "min" : "35.000000", + "pct" : "0.33", + "pct_95" : "35.000000", + "stddev" : 0, + "sum" : "35.000000" + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "127.0.0.1", + "median" : 0, + "min" : "127.0.0.1", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + } + }, + "class" : { + "checksum" : "AA8E9FA785927259", + "cnt" : 1, + "fingerprint" : "prepare select i from d.t where i=?", + "sample" : "PREPARE SELECT i FROM d.t WHERE i=?", + "ts_max" : "2009-12-08 09:23:49.637394", + "ts_min" : "2009-12-08 09:23:49.637394" + } + }, + { + "attributes" : { + "Error_no" : { + "avg" : 0, + "cnt" : 1, + "max" : "none", + "median" : 0, + "min" : "none", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "No_good_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "No_index_used" : { + "avg" : 1, + "cnt" : 1, + "max" : 1, + "median" : 0, + "min" : 1, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 1 + }, + "Query_time" : { + "avg" : "0.000281", + "cnt" : "1.000000", + "max" : "0.000281", + "median" : "0.000281", + "min" : "0.000281", + "pct" : "0.33", + "pct_95" : "0.000281", + "stddev" : 0, + "sum" : "0.000281" + }, + "Rows_affected" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Statement_id" : { + "avg" : 0, + "cnt" : 1, + "max" : "2", + "median" : 0, + "min" : "2", + "pct" : 0.5, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "Warning_count" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "37.000000", + "cnt" : "1.000000", + "max" : "37.000000", + "median" : "37.000000", + "min" : "37.000000", + "pct" : "0.33", + "pct_95" : "37.000000", + "stddev" : 0, + "sum" : "37.000000" + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "127.0.0.1", + "median" : 0, + "min" : "127.0.0.1", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : "1106.000000", + "cnt" : "1.000000", + "max" : "1106.000000", + "median" : "1106.000000", + "min" : "1106.000000", + "pct" : "0.33", + "pct_95" : "1106.000000", + "stddev" : 0, + "sum" : "1106.000000" + } + }, + "class" : { + "checksum" : "3F79759E7FA2F117", + "cnt" : 1, + "fingerprint" : "execute select i from d.t where i=?", + "sample" : "EXECUTE SELECT i FROM d.t WHERE i=\"3\"", + "ts_max" : "2009-12-08 09:23:49.637892", + "ts_min" : "2009-12-08 09:23:49.637892" + } + }, + { + "attributes" : { + "Error_no" : { + "avg" : 0, + "cnt" : 1, + "max" : "none", + "median" : 0, + "min" : "none", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "No_good_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "No_index_used" : { + "avg" : 0, + "cnt" : 1, + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Query_time" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : "0.000000", + "median" : "0.000000", + "min" : "0.000000", + "pct" : "0.33", + "pct_95" : "0.000000", + "stddev" : 0, + "sum" : 0 + }, + "Rows_affected" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "Warning_count" : { + "avg" : 0, + "cnt" : "1.000000", + "max" : 0, + "median" : 0, + "min" : 0, + "pct" : "0.33", + "pct_95" : 0, + "stddev" : 0, + "sum" : 0 + }, + "bytes" : { + "avg" : "27.000000", + "cnt" : "1.000000", + "max" : "27.000000", + "median" : "27.000000", + "min" : "27.000000", + "pct" : "0.33", + "pct_95" : "27.000000", + "stddev" : 0, + "sum" : "27.000000" + }, + "host" : { + "avg" : 0, + "cnt" : 1, + "max" : "127.0.0.1", + "median" : 0, + "min" : "127.0.0.1", + "pct" : 0.333333333333333, + "pct_95" : 0, + "stddev" : 0, + "sum" : null + }, + "pos_in_log" : { + "avg" : "1850.000000", + "cnt" : "1.000000", + "max" : "1850.000000", + "median" : "1850.000000", + "min" : "1850.000000", + "pct" : "0.33", + "pct_95" : "1850.000000", + "stddev" : 0, + "sum" : "1850.000000" + } + }, + "class" : { + "checksum" : "AA353644DE4C4CB4", + "cnt" : 1, + "fingerprint" : "administrator command: Quit", + "sample" : "administrator command: Quit", + "ts_max" : "2009-12-08 09:23:49.638381", + "ts_min" : "2009-12-08 09:23:49.638381" + } + } +] + From 457d61edfe132322e2e206d7f2b05b04f27e0301 Mon Sep 17 00:00:00 2001 From: "Brian Fraser fraserb@gmail.com" <> Date: Thu, 24 Jan 2013 12:11:11 -0300 Subject: [PATCH 29/34] Fix a broken test and make another more resilient to future changes --- bin/pt-query-digest | 2 +- t/pt-query-digest/option_sanity.t | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index bc3f206c..c38332f7 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -13593,7 +13593,7 @@ sub main { for my $tables ('review-table', 'history-table') { my $got = $o->get($tables); if ( grep !defined, Quoter->split_unquote($got) ) { - $o->save_error("$tables should be passed a " + $o->save_error("--$tables should be passed a " . "fully-qualified table name, got $got"); } } diff --git a/t/pt-query-digest/option_sanity.t b/t/pt-query-digest/option_sanity.t index b418ff68..1e177a62 100644 --- a/t/pt-query-digest/option_sanity.t +++ b/t/pt-query-digest/option_sanity.t @@ -16,11 +16,15 @@ use PerconaTest; my $cmd = "$trunk/bin/pt-query-digest"; my $help = qx{$cmd --help}; +my $output; + # ############################################################################# # Test cmd line op sanity. # ############################################################################# -my $output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox --review-table test`; -like($output, qr/--review-table requires a fully/, 'Dies if no database part in --review-table'); +for my $opt (qw(review-table history-table)) { + $output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox --$opt test`; + like($output, qr/--$opt should be passed a/, "Dies if no database part in --$opt"); +} $output = `$cmd --review h=127.1,P=12345,u=msandbox,p=msandbox,D=test,t=test`; like($output, qr/--review does not accept a t option/, 'Dies if t part in --review DSN'); @@ -100,8 +104,8 @@ like( $help, qr/\Q--report-format=A\E\s* \QPrint these sections of the query analysis\E\s* - \Qreport (default rusage,date,hostname,files,\E\s* - \Qheader,profile,query_report,prepared)\E/x, + \Qreport (default rusage\E,\s*date,\s*hostname,\s*files,\s* + header,\s*profile,\s*query_report,\s*prepared\)/x, "Bug 831525: pt-query-digest help output mangled" ); From 541db4994e9fe416c997fb422cc699420b5fdd09 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 30 Jan 2013 17:17:02 -0300 Subject: [PATCH 30/34] Updated explain samples for 5.0 --- t/pt-query-digest/samples/issue_1196-output-5.0.txt | 10 ++++------ t/pt-query-digest/samples/slow007_explain_1.txt | 4 +--- t/pt-query-digest/samples/slow007_explain_2.txt | 4 +--- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/t/pt-query-digest/samples/issue_1196-output-5.0.txt b/t/pt-query-digest/samples/issue_1196-output-5.0.txt index 17eaefa0..8d0edc99 100644 --- a/t/pt-query-digest/samples/issue_1196-output-5.0.txt +++ b/t/pt-query-digest/samples/issue_1196-output-5.0.txt @@ -1,14 +1,12 @@ # Profile -# Rank Query ID Response time Calls R/Call Apdx V/M EXPLAIN Item -# ==== ================== ============= ===== ====== ==== ===== ======= ======== -# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 1.00 0.00 TF>aa SELECT t +# Rank Query ID Response time Calls R/Call V/M Item +# ==== ================== ============= ===== ====== ===== ======== +# 1 0xD4B6A5CD2F2F485C 0.2148 100.0% 1 0.2148 0.00 SELECT t # Query 1: 0 QPS, 0x concurrency, ID 0xD4B6A5CD2F2F485C at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: TF>aa -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2010-12-14 16:12:28 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_1.txt b/t/pt-query-digest/samples/slow007_explain_1.txt index 8bdfeceb..8c2ce5a0 100644 --- a/t/pt-query-digest/samples/slow007_explain_1.txt +++ b/t/pt-query-digest/samples/slow007_explain_1.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: s -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= diff --git a/t/pt-query-digest/samples/slow007_explain_2.txt b/t/pt-query-digest/samples/slow007_explain_2.txt index 17ab592f..3cfac091 100644 --- a/t/pt-query-digest/samples/slow007_explain_2.txt +++ b/t/pt-query-digest/samples/slow007_explain_2.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: I -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= From 3b6303f42158bc07dbcaf20c23169419b2bc39cd Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 30 Jan 2013 17:21:26 -0300 Subject: [PATCH 31/34] Updated explain samples for 5.1 --- t/pt-query-digest/samples/slow007_explain_1-51.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/t/pt-query-digest/samples/slow007_explain_1-51.txt b/t/pt-query-digest/samples/slow007_explain_1-51.txt index 9d37d8c1..2e9c9c61 100644 --- a/t/pt-query-digest/samples/slow007_explain_1-51.txt +++ b/t/pt-query-digest/samples/slow007_explain_1-51.txt @@ -1,9 +1,7 @@ # Query 1: 0 QPS, 0x concurrency, ID 0x8E306CDB7A800841 at byte 0 ________ # This item is included in the report because it matches --limit. -# Scores: Apdex = 1.00 [1.0]*, V/M = 0.00 -# EXPLAIN sparkline: s -# Query_time sparkline: | ^ | +# Scores: V/M = 0.00 # Time range: all events occurred at 2007-12-18 11:48:27 # Attribute pct total min max avg 95% stddev median # ============ === ======= ======= ======= ======= ======= ======= ======= From dbbb6998310c934c73d7eaf7ffb1fe39bf72b85d Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 30 Jan 2013 17:35:40 -0300 Subject: [PATCH 32/34] Update docs for pqd, make --create-review-tables work by default --- bin/pt-query-digest | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index c38332f7..15006241 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -15284,7 +15284,8 @@ Analyze, aggregate, and report on a slow query log: Review a slow log, saving results to the test.query_review table in a MySQL server running on host1. See L<"--review"> for more on reviewing queries: - pt-query-digest --review h=host1,D=test,t=query_review /path/to/slow.log + pt-query-digest --review h=host1 --review-table test.query_review + --history-table test.query_history /path/to/slow.log Print the structure of events so you can construct a complex L<"--filter">: @@ -15314,7 +15315,7 @@ is safe to run even on production systems, but you might want to monitor it until you are satisfied that the input you give it does not cause undue load. Various options will cause pt-query-digest to insert data into tables, execute -SQL queries, and so on. These include the L<"--review"> options. +SQL queries, and so on. These include the L<"--review"> option. At the time of this release, we know of no bugs that could cause serious harm to users. @@ -15795,7 +15796,9 @@ Continue parsing even if there is an error. The tool will not continue forever: it stops once any process causes 100 errors, in which case there is probably a bug in the tool or the input is invalid. -=item --create-review-tables +=item --[no]create-review-tables + +default: yes Create the L<"--review"> tables if they do not exist. @@ -16305,10 +16308,15 @@ type: DSN Store a sample of each class of query in this DSN, plus historical values for review trend analysis -The argument specifies a table to store all unique query fingerprints in. The -table must have at least the following columns. You can add more columns for -your own special purposes, but they won't be used by pt-query-digest. The -following CREATE TABLE definition is also used for L<"--create-review-table">. +The argument specifies a host to store all unique query fingerprints in; the +databases and tables were this data is stored can be specified with the +L<"--review-table"> and L<"--history-table"> options. +By default, if the table doesn't exist the tool mtries creating it; This +behavior can bhe controlled with the L<"--[no]create-review-tables"> option. +If the table was created manually, it must have at least the following columns. +You can add more columns for your own special purposes, but they won't be used +by pt-query-digest. The following CREATE TABLE definition is also used by +L<"--no-create-review-tables">. =for comment ignore-pt-internal-value MAGIC_create_review: @@ -16346,7 +16354,7 @@ fingerprint. This option depends on C<--group-by fingerprint> (which is the default). It will not work otherwise. -Additionally, pt-query-digest will save information into a review table, +Additionally, pt-query-digest will save historical information into a review table, so you can see how classes of queries have changed over time. You can change the destination table with the L<"--history-table"> @@ -16379,7 +16387,7 @@ you could also just add a ts_min column and make it a DATE type, so you'd get one row per class of queries per day. The default table structure follows. The following table definition is used -for L<"--create-review-tables">: +for L<"--[no]create-review-tables">: =for comment ignore-pt-internal-value MAGIC_create_review_history From 98f490492cccb453e5a36586fc51e68fdd8a1780 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 30 Jan 2013 17:47:41 -0300 Subject: [PATCH 33/34] Change JSONReportFormatter to handle versions of JSON.pm that don't add a newline at the end of the encoded json --- bin/pt-query-digest | 4 +++- lib/JSONReportFormatter.pm | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 15006241..136703a0 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -8076,7 +8076,9 @@ override query_report => sub { }; } - return $self->encode_json(\@queries) . "\n"; + my $json = $self->encode_json(\@queries); + $json .= "\n" if $json !~ /\n\Z/; + return $json . "\n"; }; 1; diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index 27118a6b..764232a2 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -103,7 +103,9 @@ override query_report => sub { }; } - return $self->encode_json(\@queries) . "\n"; + my $json = $self->encode_json(\@queries); + $json .= "\n" if $json !~ /\n\Z/; + return $json . "\n"; }; 1; From 5fd8042e3536a78ff3e341179a48c03e5a380253 Mon Sep 17 00:00:00 2001 From: Brian Fraser Date: Wed, 30 Jan 2013 17:56:19 -0300 Subject: [PATCH 34/34] Updated JSONReportFormatter to drop the hard dependency on JSON.xs and use Transformers::encode_json if JSON is not available --- bin/pt-query-digest | 22 +++++++++++++++++----- lib/JSONReportFormatter.pm | 22 +++++++++++++++++----- 2 files changed, 34 insertions(+), 10 deletions(-) diff --git a/bin/pt-query-digest b/bin/pt-query-digest index 136703a0..8d485231 100755 --- a/bin/pt-query-digest +++ b/bin/pt-query-digest @@ -7977,14 +7977,14 @@ sub _d { { package JSONReportFormatter; use Mo; -use JSON (); - -use List::Util qw(sum); +use List::Util qw(sum); use Transformers qw(make_checksum parse_timestamp); use constant PTDEBUG => $ENV{PTDEBUG} || 0; +my $have_json = eval { require JSON }; + our $pretty_json = undef; our $sorted_json = undef; @@ -7994,11 +7994,23 @@ has _json => ( is => 'ro', init_arg => undef, builder => '_build_json', - handles => { encode_json => 'encode' }, ); sub _build_json { - return JSON->new->utf8->pretty($pretty_json)->canonical($sorted_json); + return unless $have_json; + return JSON->new->utf8 + ->pretty($pretty_json) + ->canonical($sorted_json); +} + +sub encode_json { + my ($self, $encode) = @_; + if ( my $json = $self->_json ) { + return $json->encode($encode); + } + else { + return Transformers::encode_json($encode); + } } override [qw(rusage date hostname files header profile prepared)] => sub { diff --git a/lib/JSONReportFormatter.pm b/lib/JSONReportFormatter.pm index 764232a2..be0e534f 100644 --- a/lib/JSONReportFormatter.pm +++ b/lib/JSONReportFormatter.pm @@ -1,14 +1,14 @@ { package JSONReportFormatter; use Mo; -use JSON (); - -use List::Util qw(sum); +use List::Util qw(sum); use Transformers qw(make_checksum parse_timestamp); use constant PTDEBUG => $ENV{PTDEBUG} || 0; +my $have_json = eval { require JSON }; + our $pretty_json = undef; our $sorted_json = undef; @@ -18,11 +18,23 @@ has _json => ( is => 'ro', init_arg => undef, builder => '_build_json', - handles => { encode_json => 'encode' }, ); sub _build_json { - return JSON->new->utf8->pretty($pretty_json)->canonical($sorted_json); + return unless $have_json; + return JSON->new->utf8 + ->pretty($pretty_json) + ->canonical($sorted_json); +} + +sub encode_json { + my ($self, $encode) = @_; + if ( my $json = $self->_json ) { + return $json->encode($encode); + } + else { + return Transformers::encode_json($encode); + } } override [qw(rusage date hostname files header profile prepared)] => sub {