Merge remote-tracking branch 'origin/pr/263' into PMM-1429_test_metrics

This commit is contained in:
Kamil Dziedzic
2017-09-14 22:07:20 +02:00
10 changed files with 180 additions and 20 deletions

View File

@@ -47,7 +47,7 @@ BEGIN {
{ {
package Percona::Toolkit; package Percona::Toolkit;
our $VERSION = '3.0.4'; our $VERSION = '3.0.5';
use strict; use strict;
use warnings FATAL => 'all'; use warnings FATAL => 'all';
@@ -3297,6 +3297,17 @@ sub new {
foreach my $arg ( qw(MasterSlave) ) { foreach my $arg ( qw(MasterSlave) ) {
die "I need a $arg argument" unless $args{$arg}; die "I need a $arg argument" unless $args{$arg};
} }
my $kill_busy_commands = {};
if ($args{kill_busy_commands}) {
for my $command (split /,/,$args{kill_busy_commands}) {
$command =~ s/^\s+|\s+$//g;
$kill_busy_commands->{$command} = 1;
}
} else {
$kill_busy_commands->{Query} = 1;
}
$args{kill_busy_commands} = $kill_busy_commands;
my $self = { my $self = {
%args, %args,
polls => 0, polls => 0,
@@ -3523,7 +3534,7 @@ sub find {
next QUERY; next QUERY;
} }
if ( $find_spec{busy_time} && ($query->{Command} || '') eq 'Query' ) { if ( $find_spec{busy_time} && exists($self->{kill_busy_commands}->{$query->{Command} || ''}) ) {
next QUERY unless defined($query->{Time}); next QUERY unless defined($query->{Time});
if ( $query->{Time} < $find_spec{busy_time} ) { if ( $query->{Time} < $find_spec{busy_time} ) {
PTDEBUG && _d("Query isn't running long enough"); PTDEBUG && _d("Query isn't running long enough");
@@ -6788,7 +6799,7 @@ sub main {
DSNParser => $dp, DSNParser => $dp,
Quoter => "Quoter", Quoter => "Quoter",
); );
my $pl = new Processlist(MasterSlave => $ms); my $pl = new Processlist(MasterSlave => $ms, kill_busy_commands => $o->get('kill-busy-commands'));
my $qr = new QueryRewriter(); my $qr = new QueryRewriter();
my $cxn; my $cxn;
@@ -8198,6 +8209,21 @@ that pt-kill matched and killed a query.
See also L<"--wait-before-kill"> and L<"--wait-after-kill">. See also L<"--wait-before-kill"> and L<"--wait-after-kill">.
=item --kill-busy-commands
default: Query
group: Actions
Comma sepatated list of commands that will be watched/killed if they ran for
more than L<"--busy-time"> seconds. Default: C<Query>
By default, L<"--busy-time"> kills only C<Query> commands but in some cases, it
is needed to make L<"--busy-time"> to watch and kill other commands. For example,
a prepared statement execution command is C<Execute> instead of C<Query>. In this
case, specifying C<--kill-busy-commands=Query,Execute> will also kill the prepared
stamente execution.
=item --kill-query =item --kill-query
group: Actions group: Actions

View File

@@ -3251,6 +3251,17 @@ sub new {
foreach my $arg ( qw(MasterSlave) ) { foreach my $arg ( qw(MasterSlave) ) {
die "I need a $arg argument" unless $args{$arg}; die "I need a $arg argument" unless $args{$arg};
} }
my $kill_busy_commands = {};
if ($args{kill_busy_commands}) {
for my $command (split /,/,$args{kill_busy_commands}) {
$command =~ s/^\s+|\s+$//g;
$kill_busy_commands->{$command} = 1;
}
} else {
$kill_busy_commands->{Query} = 1;
}
$args{kill_busy_commands} = $kill_busy_commands;
my $self = { my $self = {
%args, %args,
polls => 0, polls => 0,
@@ -3465,6 +3476,7 @@ sub find {
my $ms = $self->{MasterSlave}; my $ms = $self->{MasterSlave};
my @matches; my @matches;
$self->{_reasons_for_matching} = undef;
QUERY: QUERY:
foreach my $query ( @$proclist ) { foreach my $query ( @$proclist ) {
PTDEBUG && _d('Checking query', Dumper($query)); PTDEBUG && _d('Checking query', Dumper($query));
@@ -3476,7 +3488,7 @@ sub find {
next QUERY; next QUERY;
} }
if ( $find_spec{busy_time} && ($query->{Command} || '') eq 'Query' ) { if ( $find_spec{busy_time} && exists($self->{kill_busy_commands}->{$query->{Command} || ''}) ) {
next QUERY unless defined($query->{Time}); next QUERY unless defined($query->{Time});
if ( $query->{Time} < $find_spec{busy_time} ) { if ( $query->{Time} < $find_spec{busy_time} ) {
PTDEBUG && _d("Query isn't running long enough"); PTDEBUG && _d("Query isn't running long enough");

View File

@@ -69,6 +69,19 @@ sub new {
foreach my $arg ( qw(MasterSlave) ) { foreach my $arg ( qw(MasterSlave) ) {
die "I need a $arg argument" unless $args{$arg}; die "I need a $arg argument" unless $args{$arg};
} }
# Convert the list of kill commands (Query, Execute, etc) to a hashref for
# faster check later
my $kill_busy_commands = {};
if ($args{kill_busy_commands}) {
for my $command (split /,/,$args{kill_busy_commands}) {
$command =~ s/^\s+|\s+$//g;
$kill_busy_commands->{$command} = 1;
}
} else {
$kill_busy_commands->{Query} = 1;
}
$args{kill_busy_commands} = $kill_busy_commands;
my $self = { my $self = {
%args, %args,
polls => 0, polls => 0,
@@ -471,6 +484,7 @@ sub find {
my $ms = $self->{MasterSlave}; my $ms = $self->{MasterSlave};
my @matches; my @matches;
$self->{_reasons_for_matching} = undef;
QUERY: QUERY:
foreach my $query ( @$proclist ) { foreach my $query ( @$proclist ) {
PTDEBUG && _d('Checking query', Dumper($query)); PTDEBUG && _d('Checking query', Dumper($query));
@@ -484,7 +498,8 @@ sub find {
} }
# Match special busy_time. # Match special busy_time.
if ( $find_spec{busy_time} && ($query->{Command} || '') eq 'Query' ) { #if ( $find_spec{busy_time} && ($query->{Command} || '') eq 'Query' ) {
if ( $find_spec{busy_time} && exists($self->{kill_busy_commands}->{$query->{Command} || ''}) ) {
next QUERY unless defined($query->{Time}); next QUERY unless defined($query->{Time});
if ( $query->{Time} < $find_spec{busy_time} ) { if ( $query->{Time} < $find_spec{busy_time} ) {
PTDEBUG && _d("Query isn't running long enough"); PTDEBUG && _d("Query isn't running long enough");

View File

@@ -7,11 +7,13 @@ import (
"gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/bson"
) )
// docsExamined is renamed from nscannedObjects in 3.2.0
type SystemProfile struct { type SystemProfile struct {
AllUsers []interface{} `bson:"allUsers"` AllUsers []interface{} `bson:"allUsers"`
Client string `bson:"client"` Client string `bson:"client"`
CursorExhausted bool `bson:"cursorExhausted"` CursorExhausted bool `bson:"cursorExhausted"`
DocsExamined int `bson:"docsExamined"` DocsExamined int `bson:"docsExamined"`
NscannedObjects int `bson:"nscannedObjects"`
ExecStats struct { ExecStats struct {
Advanced int `bson:"advanced"` Advanced int `bson:"advanced"`
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate"` ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate"`

View File

@@ -90,7 +90,12 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
s.setQueryInfoAndCounters(key, qiac) s.setQueryInfoAndCounters(key, qiac)
} }
qiac.Count++ qiac.Count++
qiac.NScanned = append(qiac.NScanned, float64(doc.DocsExamined)) // docsExamined is renamed from nscannedObjects in 3.2.0.
if doc.NscannedObjects > 0 {
qiac.NScanned = append(qiac.NScanned, float64(doc.NscannedObjects))
} else {
qiac.NScanned = append(qiac.NScanned, float64(doc.DocsExamined))
}
qiac.NReturned = append(qiac.NReturned, float64(doc.Nreturned)) qiac.NReturned = append(qiac.NReturned, float64(doc.Nreturned))
qiac.QueryTime = append(qiac.QueryTime, float64(doc.Millis)) qiac.QueryTime = append(qiac.QueryTime, float64(doc.Millis))
qiac.ResponseLength = append(qiac.ResponseLength, float64(doc.ResponseLength)) qiac.ResponseLength = append(qiac.ResponseLength, float64(doc.ResponseLength))
@@ -269,7 +274,7 @@ func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters)
queryStats.QueryTime.Pct = queryStats.QueryTime.Total * 100 / tc.QueryTime queryStats.QueryTime.Pct = queryStats.QueryTime.Total * 100 / tc.QueryTime
} }
if tc.Bytes > 0 { if tc.Bytes > 0 {
queryStats.ResponseLength.Pct = queryStats.ResponseLength.Total / tc.Bytes queryStats.ResponseLength.Pct = queryStats.ResponseLength.Total * 100 / tc.Bytes
} }
if queryStats.Returned.Total > 0 { if queryStats.Returned.Total > 0 {
queryStats.Ratio = queryStats.Scanned.Total / queryStats.Returned.Total queryStats.Ratio = queryStats.Scanned.Total / queryStats.Returned.Total
@@ -281,6 +286,7 @@ func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters)
func aggregateCounters(queries []QueryInfoAndCounters) QueryInfoAndCounters { func aggregateCounters(queries []QueryInfoAndCounters) QueryInfoAndCounters {
qt := QueryInfoAndCounters{} qt := QueryInfoAndCounters{}
for _, query := range queries { for _, query := range queries {
qt.Count += query.Count
qt.NScanned = append(qt.NScanned, query.NScanned...) qt.NScanned = append(qt.NScanned, query.NScanned...)
qt.NReturned = append(qt.NReturned, query.NReturned...) qt.NReturned = append(qt.NReturned, query.NReturned...)
qt.QueryTime = append(qt.QueryTime, query.QueryTime...) qt.QueryTime = append(qt.QueryTime, query.QueryTime...)

View File

@@ -165,7 +165,8 @@ func main() {
if opts.Limit > 0 && len(sortedQueryStats) > opts.Limit { if opts.Limit > 0 && len(sortedQueryStats) > opts.Limit {
sortedQueryStats = sortedQueryStats[:opts.Limit] sortedQueryStats = sortedQueryStats[:opts.Limit]
} }
for _, qs := range sortedQueryStats { for i, qs := range sortedQueryStats {
qs.Rank = i + 1
t.Execute(os.Stdout, qs) t.Execute(os.Stdout, qs)
} }
@@ -426,15 +427,11 @@ func sortQueries(queries []stats.QueryStats, orderby []string) []stats.QueryStat
case "ratio": case "ratio":
f = func(c1, c2 *stats.QueryStats) bool { f = func(c1, c2 *stats.QueryStats) bool {
ratio1 := c1.Scanned.Max / c1.Returned.Max return c1.Ratio < c2.Ratio
ratio2 := c2.Scanned.Max / c2.Returned.Max
return ratio1 < ratio2
} }
case "-ratio": case "-ratio":
f = func(c1, c2 *stats.QueryStats) bool { f = func(c1, c2 *stats.QueryStats) bool {
ratio1 := c1.Scanned.Max / c1.Returned.Max return c1.Ratio > c2.Ratio
ratio2 := c2.Scanned.Max / c2.Returned.Max
return ratio1 > ratio2
} }
// //

View File

@@ -22,7 +22,7 @@
"Median": 0 "Median": 0
}, },
"ResponseLength": { "ResponseLength": {
"Pct": 0.9995949739087844, "Pct": 99.95949739087844,
"Total": 1061230, "Total": 1061230,
"Min": 1061230, "Min": 1061230,
"Max": 1061230, "Max": 1061230,
@@ -75,7 +75,7 @@
"Median": 7 "Median": 7
}, },
"ResponseLength": { "ResponseLength": {
"Pct": 0.00020251304560782172, "Pct": 0.020251304560782172,
"Total": 215, "Total": 215,
"Min": 215, "Min": 215,
"Max": 215, "Max": 215,
@@ -128,7 +128,7 @@
"Median": 0 "Median": 0
}, },
"ResponseLength": { "ResponseLength": {
"Pct": 0.00020251304560782172, "Pct": 0.020251304560782172,
"Total": 215, "Total": 215,
"Min": 215, "Min": 215,
"Max": 215, "Max": 215,

View File

@@ -6,8 +6,8 @@
"Fingerprint": "", "Fingerprint": "",
"FirstSeen": "0001-01-01T00:00:00Z", "FirstSeen": "0001-01-01T00:00:00Z",
"LastSeen": "0001-01-01T00:00:00Z", "LastSeen": "0001-01-01T00:00:00Z",
"Count": 0, "Count": 3,
"QPS": 0, "QPS": 3,
"Rank": 0, "Rank": 0,
"Ratio": 134.33333333333334, "Ratio": 134.33333333333334,
"QueryTime": { "QueryTime": {
@@ -21,7 +21,7 @@
"Median": 0 "Median": 0
}, },
"ResponseLength": { "ResponseLength": {
"Pct": 1, "Pct": 100,
"Total": 1061660, "Total": 1061660,
"Min": 215, "Min": 215,
"Max": 1061230, "Max": 1061230,

98
t/pt-kill/pt_167.t Normal file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Time::HiRes qw(sleep);
use Test::More;
use threads;
use PerconaTest;
use Sandbox;
require "$trunk/bin/pt-kill";
use Data::Dumper;
$Data::Dumper::Indent = 1;
$Data::Dumper::Sortkeys = 1;
$Data::Dumper::Quotekeys = 0;
my $o = new OptionParser(description => 'Diskstats',
file => "$trunk/bin/pt-kill",
);
$o->get_specs("$trunk/bin/pt-table-checksum");
$o->get_opts();
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
$sb->load_file('master', "t/pt-kill/samples/pt_167.sql");
my $ps = $dbh->prepare('INSERT INTO test.foo VALUES (?)');
for (my $i=0; $i < 20000; $i++) {
$ps->execute($i);
}
sub start_thread {
my ($dp, $o, $dsn, $sleep_time) = @_;
diag("Thread started");
my $cxn = new Cxn(DSNParser => $dp, OptionParser => $o, dsn => $dsn);
$cxn->connect();
my $dbh = $cxn->dbh();
my $sth = $dbh->prepare('SELECT COUNT(*) FROM (SELECT a.id AS a_id, b.id AS b_id FROM foo AS a, foo AS b) AS c');
# Since this query is going to be killed, wrap the execution in an eval to prevent
# displaying the error message.
eval {
$sth->execute();
};
}
my $dsn = "h=127.1,P=12345,u=msandbox,p=msandbox,D=test;mysql_server_prepare=1";
my $thr = threads->create('start_thread', $dp, $o, $dp->parse($dsn), 1);
$thr->detach();
threads->yield();
sleep(1);
my $rows = $dbh->selectall_hashref('show processlist', 'id');
my $pid = 0; # reuse, reset
map { $pid = $_->{id} }
grep { $_->{info} && $_->{info} =~ m/SELECT COUNT\(\*\) FROM \(SELECT a.id/ }
values %$rows;
ok(
$pid,
"Got proc id of sleeping query: $pid"
);
$dsn = $sb->dsn_for('master');
my $output = output(
sub { pt_kill::main($dsn, "--kill-busy-commands","Query,Execute", qw(--run-time 3s --kill --busy-time 2s --print --match-info), "^(select|SELECT)"), },
stderr => 1,
);
like(
$output,
qr/KILL $pid \(Execute/,
'--kill-query'
) or diag($output);
# #############################################################################
# Done.
# #############################################################################
$sb->wipe_clean($dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
done_testing;

View File

@@ -0,0 +1,4 @@
DROP DATABASE IF EXISTS test;
CREATE DATABASE test;
create table test.foo (id integer);