mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-07 04:49:48 +00:00
Remove trailing spaces (#665)
* Remove trailing spaces * PR-665 - Remove trailing spaces - Updated not stable test t/pt-online-schema-change/preserve_triggers.t - Updated utilities in bin directory * PR-665 - Remove trailing spaces - Fixed typos * PR-665 - Remove trailing spaces - Fixed typos --------- Co-authored-by: Sveta Smirnova <sveta.smirnova@percona.com>
This commit is contained in:
@@ -108,7 +108,7 @@ sub parse_event {
|
||||
PTDEBUG && _d('Comment line');
|
||||
next LINE;
|
||||
}
|
||||
|
||||
|
||||
if ( $line =~ m/^DELIMITER/m ) {
|
||||
my ( $del ) = $line =~ m/^DELIMITER (\S*)$/m;
|
||||
if ( $del ) {
|
||||
|
@@ -116,7 +116,7 @@ sub set_src {
|
||||
}
|
||||
elsif ( lc $src eq 'right' ) {
|
||||
$self->{src_db_tbl} = $self->{right_db_tbl};
|
||||
$self->{dst_db_tbl} = $self->{left_db_tbl};
|
||||
$self->{dst_db_tbl} = $self->{left_db_tbl};
|
||||
}
|
||||
else {
|
||||
die "src argument must be either 'left' or 'right'"
|
||||
@@ -388,7 +388,7 @@ sub make_REPLACE {
|
||||
# A SQL statement
|
||||
sub make_row {
|
||||
my ( $self, $verb, $row, $cols ) = @_;
|
||||
my @cols;
|
||||
my @cols;
|
||||
if ( my $dbh = $self->{fetch_back} ) {
|
||||
my $where = $self->make_where_clause($row, $cols);
|
||||
my $sql = $self->make_fetch_back_query($where);
|
||||
@@ -474,7 +474,7 @@ sub get_changes {
|
||||
sub sort_cols {
|
||||
my ( $self, $row ) = @_;
|
||||
my @cols;
|
||||
if ( $self->{tbl_struct} ) {
|
||||
if ( $self->{tbl_struct} ) {
|
||||
my $pos = $self->{tbl_struct}->{col_posn};
|
||||
my @not_in_tbl;
|
||||
@cols = sort {
|
||||
|
@@ -96,7 +96,7 @@ sub before_execute {
|
||||
if $EVAL_ERROR;
|
||||
|
||||
# Save the tmp tbl; it's used later in _compare_checksums().
|
||||
$event->{tmp_tbl} = $tmp_tbl;
|
||||
$event->{tmp_tbl} = $tmp_tbl;
|
||||
|
||||
# Wrap the original query so when it's executed its results get
|
||||
# put in tmp table.
|
||||
@@ -297,7 +297,7 @@ sub _checksum_results {
|
||||
};
|
||||
die "Failed to checksum table: $EVAL_ERROR"
|
||||
if $EVAL_ERROR;
|
||||
|
||||
|
||||
$sql = "DROP TABLE IF EXISTS $tmp_tbl";
|
||||
PTDEBUG && _d($sql);
|
||||
eval {
|
||||
@@ -331,7 +331,7 @@ sub _compare_rows {
|
||||
my $different_column_values = 0;
|
||||
|
||||
my $n_events = scalar @$events;
|
||||
my $event0 = $events->[0];
|
||||
my $event0 = $events->[0];
|
||||
my $item = $event0->{fingerprint} || $event0->{arg};
|
||||
my $sampleno = $event0->{sampleno} || 0;
|
||||
my $dbh = $hosts->[0]->{dbh}; # doesn't matter which one
|
||||
@@ -402,7 +402,7 @@ sub _compare_rows {
|
||||
sth => $left,
|
||||
row => $lr,
|
||||
Outfile => $outfile,
|
||||
);
|
||||
);
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -472,8 +472,8 @@ sub _compare_rows {
|
||||
);
|
||||
|
||||
# Save differences.
|
||||
if ( scalar @diff_rows ) {
|
||||
$different_column_values++;
|
||||
if ( scalar @diff_rows ) {
|
||||
$different_column_values++;
|
||||
$self->{diffs}->{col_vals}->{$item}->{$sampleno} = \@diff_rows;
|
||||
$self->{samples}->{$item}->{$sampleno} = $event0->{arg};
|
||||
}
|
||||
|
@@ -116,7 +116,7 @@ sub before_execute {
|
||||
# * dbh scalar: active dbh
|
||||
# Returns: hashref
|
||||
# Can die: yes
|
||||
# execute() executes the event's query if is hasn't already been executed.
|
||||
# execute() executes the event's query if is hasn't already been executed.
|
||||
# Any prep work should have been done in before_execute(). Adds Query_time
|
||||
# attrib to the event.
|
||||
sub execute {
|
||||
|
14
lib/Cxn.pm
14
lib/Cxn.pm
@@ -55,7 +55,7 @@ use constant {
|
||||
# Optional Arguments:
|
||||
# dbh - Pre-created, uninitialized dbh
|
||||
# set - Callback to set vars on dbh when dbh is first connected
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# Cxn object
|
||||
sub new {
|
||||
@@ -173,7 +173,7 @@ sub set_dbh {
|
||||
# created the dbh probably didn't set what we set here. For example,
|
||||
# MasterSlave makes dbhs when finding slaves, but it doesn't set
|
||||
# anything.
|
||||
# Due to https://github.com/perl5-dbi/DBD-mysql/issues/306 we assigning
|
||||
# Due to https://github.com/perl5-dbi/DBD-mysql/issues/306 we assigning
|
||||
# connection_id to $self->{dbh_set} and compare it with current connection_id.
|
||||
# This is required to set variable values again after disconnect.
|
||||
if ( $self->{dbh} && $self->{dbh} == $dbh && $self->{dbh_set} && $self->{dbh_set} == $connection_id) {
|
||||
@@ -238,8 +238,8 @@ sub description {
|
||||
return sprintf("%s -> %s:%s", $self->name(), $self->{dsn}->{h} || 'localhost' , $self->{dsn}->{P} || 'socket');
|
||||
}
|
||||
|
||||
# This returns the server_id.
|
||||
# For cluster nodes, since server_id is unreliable, we use a combination of
|
||||
# This returns the server_id.
|
||||
# For cluster nodes, since server_id is unreliable, we use a combination of
|
||||
# variables to create an id string that is unique.
|
||||
sub get_id {
|
||||
my ($self, $cxn) = @_;
|
||||
@@ -251,7 +251,7 @@ sub get_id {
|
||||
my $sql = q{SHOW STATUS LIKE 'wsrep\_local\_index'};
|
||||
my (undef, $wsrep_local_index) = $cxn->dbh->selectrow_array($sql);
|
||||
PTDEBUG && _d("Got cluster wsrep_local_index: ",$wsrep_local_index);
|
||||
$unique_id = $wsrep_local_index."|";
|
||||
$unique_id = $wsrep_local_index."|";
|
||||
foreach my $val ('server\_id', 'wsrep\_sst\_receive\_address', 'wsrep\_node\_name', 'wsrep\_node\_address') {
|
||||
my $sql = "SHOW VARIABLES LIKE '$val'";
|
||||
PTDEBUG && _d($cxn->name, $sql);
|
||||
@@ -285,7 +285,7 @@ sub is_cluster_node {
|
||||
PTDEBUG && _d($sql); #don't invoke name() if it's not a Cxn!
|
||||
}
|
||||
else {
|
||||
$dbh = $cxn->dbh();
|
||||
$dbh = $cxn->dbh();
|
||||
PTDEBUG && _d($cxn->name, $sql);
|
||||
}
|
||||
|
||||
@@ -348,7 +348,7 @@ sub DESTROY {
|
||||
return;
|
||||
}
|
||||
|
||||
# We have to create a wrapper around $dbh->ping() here due to
|
||||
# We have to create a wrapper around $dbh->ping() here due to
|
||||
# https://github.com/perl5-dbi/DBD-mysql/issues/306
|
||||
sub _ping() {
|
||||
my ( $self, $dbh ) = @_;
|
||||
|
@@ -147,7 +147,7 @@ sub parse {
|
||||
foreach my $key ( keys %$opts ) {
|
||||
PTDEBUG && _d('Finding value for', $key);
|
||||
$final_props{$key} = $given_props{$key};
|
||||
if ( !defined $final_props{$key}
|
||||
if ( !defined $final_props{$key}
|
||||
&& defined $prev->{$key} && $opts->{$key}->{copy} )
|
||||
{
|
||||
$final_props{$key} = $prev->{$key};
|
||||
@@ -303,7 +303,7 @@ sub get_dbh {
|
||||
my $dbh;
|
||||
my $tries = 2;
|
||||
while ( !$dbh && $tries-- ) {
|
||||
PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass,
|
||||
PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass,
|
||||
join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults ));
|
||||
|
||||
$dbh = eval { DBI->connect($cxn_string, $user, $pass, $defaults) };
|
||||
@@ -515,7 +515,7 @@ sub set_vars {
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
sub _d {
|
||||
|
@@ -84,8 +84,8 @@ sub run {
|
||||
$parent_exit->($child_pid) if $parent_exit;
|
||||
exit 0;
|
||||
}
|
||||
|
||||
# I'm the child.
|
||||
|
||||
# I'm the child.
|
||||
POSIX::setsid() or die "Cannot start a new session: $OS_ERROR";
|
||||
chdir '/' or die "Cannot chdir to /: $OS_ERROR";
|
||||
|
||||
@@ -123,7 +123,7 @@ sub run {
|
||||
# best that we just explicitly close all fds before reopening them.
|
||||
close STDERR;
|
||||
open STDERR, ">&STDOUT"
|
||||
or die "Cannot dupe STDERR to STDOUT: $OS_ERROR";
|
||||
or die "Cannot dupe STDERR to STDOUT: $OS_ERROR";
|
||||
}
|
||||
else {
|
||||
if ( -t STDOUT ) {
|
||||
@@ -167,7 +167,7 @@ sub _make_pid_file {
|
||||
eval {
|
||||
sysopen(PID_FH, $pid_file, O_RDWR|O_CREAT|O_EXCL) or die $OS_ERROR;
|
||||
print PID_FH $PID, "\n";
|
||||
close PID_FH;
|
||||
close PID_FH;
|
||||
};
|
||||
if ( my $e = $EVAL_ERROR ) {
|
||||
if ( $e =~ m/file exists/i ) {
|
||||
|
@@ -623,7 +623,7 @@ sub parse_from {
|
||||
}
|
||||
else {
|
||||
my $filename = $args{filename} || $self->filename();
|
||||
|
||||
|
||||
open my $fh, "<", $filename
|
||||
or die "Cannot parse $filename: $OS_ERROR";
|
||||
$lines_read = $self->_parse_from_filehandle(
|
||||
@@ -642,7 +642,7 @@ sub parse_from {
|
||||
# run of the mill filehandle.
|
||||
#
|
||||
# Parameters:
|
||||
# filehandle -
|
||||
# filehandle -
|
||||
# sample_callback - Called each time a sample is processed, passed
|
||||
# the latest timestamp.
|
||||
#
|
||||
@@ -879,7 +879,7 @@ sub _print_device_if {
|
||||
$self->_mark_if_active($dev);
|
||||
return $dev if $dev =~ $dev_re;
|
||||
}
|
||||
else {
|
||||
else {
|
||||
if ( $self->active_device($dev) ) {
|
||||
# If --show-interactive is enabled, or we've seen
|
||||
# the device be active at least once.
|
||||
|
@@ -19,7 +19,7 @@
|
||||
# ###########################################################################
|
||||
{
|
||||
# Package: DiskstatsGroupByAll
|
||||
#
|
||||
#
|
||||
|
||||
package DiskstatsGroupByAll;
|
||||
|
||||
|
@@ -19,7 +19,7 @@
|
||||
# ###########################################################################
|
||||
{
|
||||
# Package: DiskstatsGroupByDisk
|
||||
#
|
||||
#
|
||||
|
||||
package DiskstatsGroupByDisk;
|
||||
|
||||
|
@@ -112,18 +112,18 @@ sub run_interactive {
|
||||
# fork(), but future-proofing it in case we ever need to speak to
|
||||
# the child
|
||||
$child_pid = open $child_fh, "-|";
|
||||
|
||||
|
||||
die "Cannot fork: $OS_ERROR" unless defined $child_pid;
|
||||
|
||||
|
||||
if ( !$child_pid ) {
|
||||
# Child
|
||||
STDOUT->autoflush(1);
|
||||
# Bit of helpful magic: Changes how the program's name is displayed,
|
||||
# so it's easier to track in things like ps.
|
||||
local $PROGRAM_NAME = "$PROGRAM_NAME (data-gathering daemon)";
|
||||
|
||||
|
||||
close $tmp_fh if $tmp_fh;
|
||||
|
||||
|
||||
PTDEBUG && _d("Child is [$PROGRAM_NAME] in ps aux and similar");
|
||||
|
||||
gather_samples(
|
||||
@@ -317,7 +317,7 @@ sub gather_samples {
|
||||
|
||||
my @to_print = timestamp();
|
||||
push @to_print, <$diskstats_fh>;
|
||||
|
||||
|
||||
for my $fh ( @fhs ) {
|
||||
print { $fh } @to_print;
|
||||
}
|
||||
@@ -417,13 +417,13 @@ sub group_by {
|
||||
else {
|
||||
$obj->set_interactive(0);
|
||||
}
|
||||
|
||||
|
||||
my $print_header;
|
||||
my $header_callback = $args{header_callback} || sub {
|
||||
my ($self, @args) = @_;
|
||||
$self->print_header(@args) unless $print_header++
|
||||
};
|
||||
|
||||
|
||||
$obj->group_by(
|
||||
filehandle => $args{filehandle},
|
||||
# Only print the header once, as if in interactive.
|
||||
@@ -501,7 +501,7 @@ sub get_new_value_for {
|
||||
my (%args) = @_;
|
||||
my $o = $args{OptionParser};
|
||||
my $new_interval = get_blocking_input($message) || 0;
|
||||
|
||||
|
||||
die "Invalid timeout: $new_interval"
|
||||
unless looks_like_number($new_interval)
|
||||
&& ($new_interval = int($new_interval));
|
||||
@@ -523,7 +523,7 @@ sub get_new_regex_for {
|
||||
my (%args) = @_;
|
||||
my $o = $args{OptionParser};
|
||||
my $new_regex = get_blocking_input($message);
|
||||
|
||||
|
||||
local $EVAL_ERROR;
|
||||
if ( $new_regex && (my $re = eval { qr/$new_regex/i }) ) {
|
||||
$o->get("current_group_by_obj")
|
||||
|
@@ -93,7 +93,7 @@ sub get_duplicate_keys {
|
||||
if ( $args{ignore_order} || $is_fulltext ) {
|
||||
my $ordered_cols = join(',', sort(split(/,/, $key->{colnames})));
|
||||
PTDEBUG && _d('Reordered', $key->{name}, 'cols from',
|
||||
$key->{colnames}, 'to', $ordered_cols);
|
||||
$key->{colnames}, 'to', $ordered_cols);
|
||||
$key->{colnames} = $ordered_cols;
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ sub get_duplicate_keys {
|
||||
# $push_to = \@hash_keys if $is_hash;
|
||||
# $push_to = \@spatial_keys if $is_spatial;
|
||||
}
|
||||
push @$push_to, $key;
|
||||
push @$push_to, $key;
|
||||
}
|
||||
|
||||
# Redundantly constrained unique keys are treated as normal keys.
|
||||
@@ -246,7 +246,7 @@ sub get_duplicate_fks {
|
||||
# Optional args are:
|
||||
# * exact_duplicates Keys are dupes only if they're exact duplicates
|
||||
# * callback Sub called for each dupe found
|
||||
#
|
||||
#
|
||||
# For a full technical explanation of how/why this sub works, read:
|
||||
# http://code.google.com/p/maatkit/wiki/DeterminingDuplicateKeys
|
||||
sub remove_prefix_duplicates {
|
||||
@@ -282,7 +282,7 @@ sub remove_prefix_duplicates {
|
||||
@$left_keys = reverse sort { lc($a->{colnames}) cmp lc($b->{colnames}) }
|
||||
grep { defined $_; }
|
||||
@$left_keys;
|
||||
|
||||
|
||||
# Last left key is its second-to-last key.
|
||||
# The very last left key will be used as a right key.
|
||||
$last_left_key = scalar(@$left_keys) - 2;
|
||||
@@ -339,7 +339,7 @@ sub remove_prefix_duplicates {
|
||||
if ( my $type = $right_keys->[$right_index]->{unconstrained} ) {
|
||||
$reason .= "Uniqueness of $right_name ignored because "
|
||||
. $right_keys->[$right_index]->{constraining_key}->{name}
|
||||
. " is a $type constraint\n";
|
||||
. " is a $type constraint\n";
|
||||
}
|
||||
my $exact_dupe = $right_len_cols < $left_len_cols ? 0 : 1;
|
||||
$reason .= $right_name
|
||||
|
@@ -68,7 +68,7 @@ my @buck_vals = map { bucket_value($_); } (0..NUM_BUCK-1);
|
||||
# will be created using the event's "apple" value or,
|
||||
# if that attrib doesn't exist, its "orange" value.
|
||||
# If this option isn't specified, then then all attributes# are auto-detected and aggregated.
|
||||
# ignore_attributes - Arrayref of auto-detected attributes to ignore.
|
||||
# ignore_attributes - Arrayref of auto-detected attributes to ignore.
|
||||
# This does not apply to the attributes specified
|
||||
# with the optional attributes option above.
|
||||
# unroll_limit - If this many events have been processed and some
|
||||
@@ -457,13 +457,13 @@ sub make_handler {
|
||||
# "unrolled" subroutine.
|
||||
my @unrolled = (
|
||||
# Get $val from primary attrib name.
|
||||
"\$val = \$event->{'$attrib'};",
|
||||
|
||||
"\$val = \$event->{'$attrib'};",
|
||||
|
||||
# Get $val from alternate attrib names.
|
||||
( map { "\$val = \$event->{'$_'} unless defined \$val;" }
|
||||
grep { $_ ne $attrib } @{$args{alternates}}
|
||||
),
|
||||
|
||||
|
||||
# Execute the code lines, if $val is defined.
|
||||
'defined $val && do {',
|
||||
@lines,
|
||||
@@ -498,7 +498,7 @@ sub make_handler {
|
||||
# Sub: bucket_idx
|
||||
# Return the bucket number for the given value. Buck numbers are zero-indexed,
|
||||
# so although there are 1,000 buckets (NUM_BUCK), 999 is the greatest idx.
|
||||
#
|
||||
#
|
||||
# Notice that this sub is not a class method, so either call it
|
||||
# from inside this module like bucket_idx() or outside this module
|
||||
# like EventAggregator::bucket_idx().
|
||||
@@ -558,7 +558,7 @@ sub bucket_value {
|
||||
# of 1,000 buckets, the value of each represents its index in an 8 bucket
|
||||
# base 10 array. For example: base 10 bucket 0 represents vals (0, 0.000010),
|
||||
# and base 1.05 buckets 0..47 represent vals (0, 0.000010401). So the first
|
||||
# 48 elements of the returned array will have 0 as their values.
|
||||
# 48 elements of the returned array will have 0 as their values.
|
||||
# TODO: right now it's hardcoded to buckets of 10, in the future maybe not.
|
||||
{
|
||||
my @buck_tens;
|
||||
@@ -566,7 +566,7 @@ sub bucket_value {
|
||||
return @buck_tens if @buck_tens;
|
||||
|
||||
# To make a more precise map, we first set the starting values for
|
||||
# each of the 8 base 10 buckets.
|
||||
# each of the 8 base 10 buckets.
|
||||
my $start_bucket = 0;
|
||||
my @base10_starts = (0);
|
||||
map { push @base10_starts, (10**$_)*MIN_BUCK } (1..7);
|
||||
@@ -714,7 +714,7 @@ sub _calc_metrics {
|
||||
BUCKET:
|
||||
for my $bucket ( reverse 0..(NUM_BUCK-1) ) {
|
||||
my $val = $vals->[$bucket];
|
||||
next BUCKET unless $val;
|
||||
next BUCKET unless $val;
|
||||
|
||||
$total_left -= $val;
|
||||
$sum_excl += $val;
|
||||
@@ -809,7 +809,7 @@ sub top_events {
|
||||
my ($total, $count) = (0, 0);
|
||||
foreach my $groupby ( @sorted ) {
|
||||
# Events that fall into the top criterion for some reason
|
||||
if (
|
||||
if (
|
||||
(!$args{total} || $total < $args{total} )
|
||||
&& ( !$args{count} || $count < $args{count} )
|
||||
) {
|
||||
@@ -918,7 +918,7 @@ sub merge {
|
||||
die "EventAggregator objects have different worst: "
|
||||
. "$ea1->{worst} and $ea->{worst}"
|
||||
unless $ea1->{worst} eq $ea->{worst};
|
||||
|
||||
|
||||
my $attrib_types = $ea->attributes();
|
||||
map {
|
||||
$attrib_types{$_} = $attrib_types->{$_}
|
||||
|
@@ -47,7 +47,7 @@ sub new {
|
||||
}
|
||||
my $self = {
|
||||
step => 0.05, # default
|
||||
%args,
|
||||
%args,
|
||||
rate_ok => undef,
|
||||
last_check => undef,
|
||||
stats => {
|
||||
@@ -106,7 +106,7 @@ sub throttle {
|
||||
($args{stats}->{throttle_rate_max} || ()), $current_rate);
|
||||
}
|
||||
PTDEBUG && _d('Current rate:', $current_rate);
|
||||
}
|
||||
}
|
||||
|
||||
# rand() returns a fractional value between [0,1). If skip_prob is
|
||||
# 0 then, then no queries will be skipped. If its 1.0, then all queries
|
||||
|
@@ -19,8 +19,8 @@
|
||||
# ###########################################################################
|
||||
{
|
||||
# Package: FlowControlWaiter
|
||||
# FlowControlWaiter helps limit load when there's too much Flow Control pausing
|
||||
# It is based on the other "Waiter" modules:
|
||||
# FlowControlWaiter helps limit load when there's too much Flow Control pausing
|
||||
# It is based on the other "Waiter" modules:
|
||||
# ReplicaLagWaiter & MySQLStatusWaiter
|
||||
package FlowControlWaiter;
|
||||
|
||||
@@ -36,12 +36,12 @@ use Data::Dumper;
|
||||
#
|
||||
# Required Arguments:
|
||||
# oktorun - Callback that returns true if it's ok to continue running
|
||||
# node - Node dbh on which to check for wsrep_flow_control_paused_ns
|
||||
# node - Node dbh on which to check for wsrep_flow_control_paused_ns
|
||||
# sleep - Callback to sleep between checks.
|
||||
# max_pct - Max percent of flow control caused pause time to tolerate
|
||||
# max_pct - Max percent of flow control caused pause time to tolerate
|
||||
#
|
||||
# Returns:
|
||||
# FlowControlWaiter object
|
||||
# FlowControlWaiter object
|
||||
sub new {
|
||||
my ( $class, %args ) = @_;
|
||||
my @required_args = qw(oktorun node sleep max_flow_ctl);
|
||||
@@ -52,10 +52,10 @@ sub new {
|
||||
my $self = {
|
||||
%args
|
||||
};
|
||||
|
||||
|
||||
# Get current hi-res epoch seconds
|
||||
$self->{last_time} = time();
|
||||
|
||||
$self->{last_time} = time();
|
||||
|
||||
# Get nanoseconds server has been paused due to Flow Control
|
||||
my (undef, $last_fc_ns) = $self->{node}->selectrow_array('SHOW STATUS LIKE "wsrep_flow_control_paused_ns"');
|
||||
|
||||
@@ -99,19 +99,19 @@ sub wait {
|
||||
$pr->set_callback($pr_callback);
|
||||
}
|
||||
|
||||
# Loop where we wait for average pausing time caused by FC to fall below --max-flow-ctl
|
||||
# Loop where we wait for average pausing time caused by FC to fall below --max-flow-ctl
|
||||
# Average pause time is calculated starting from the last iteration.
|
||||
while ( $oktorun->() && $too_much_fc ) {
|
||||
my $current_time = time();
|
||||
my (undef, $current_fc_ns) = $node->selectrow_array('SHOW STATUS LIKE "wsrep_flow_control_paused_ns"');
|
||||
my $current_fc_secs = $current_fc_ns/1000_000_000;
|
||||
my $current_avg = ($current_fc_secs - $self->{last_fc_secs}) / ($current_time - $self->{last_time});
|
||||
if ( $current_avg > $max_avg ) {
|
||||
my $current_avg = ($current_fc_secs - $self->{last_fc_secs}) / ($current_time - $self->{last_time});
|
||||
if ( $current_avg > $max_avg ) {
|
||||
if ( $pr ) {
|
||||
# There's no real progress because we can't estimate how long
|
||||
# it will take the values to abate.
|
||||
$pr->update(sub { return 0; });
|
||||
}
|
||||
}
|
||||
PTDEBUG && _d('Calling sleep callback');
|
||||
if ( $self->{simple_progress} ) {
|
||||
print STDERR "Waiting for Flow Control to abate\n";
|
||||
|
@@ -248,7 +248,7 @@ sub _split_url {
|
||||
or die(qq/SSL certificate not valid for $host\n/);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
$self->{host} = $host;
|
||||
$self->{port} = $port;
|
||||
|
||||
|
@@ -42,7 +42,7 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
||||
# IndexUsage object
|
||||
sub new {
|
||||
my ( $class, %args ) = @_;
|
||||
|
||||
|
||||
my $self = {
|
||||
%args,
|
||||
tables_for => {}, # Keyed off db
|
||||
@@ -110,7 +110,7 @@ sub add_query {
|
||||
}
|
||||
|
||||
# Sub: add_table_usage
|
||||
# Increase usage count for table (even if no indexes in it are used).
|
||||
# Increase usage count for table (even if no indexes in it are used).
|
||||
# If saving results, the tables table is updated, too.
|
||||
#
|
||||
# Parameters:
|
||||
|
@@ -58,14 +58,14 @@ has _json => (
|
||||
builder => '_build_json',
|
||||
);
|
||||
|
||||
has 'max_query_length' => (
|
||||
has 'max_query_length' => (
|
||||
is => 'rw',
|
||||
isa => 'Int',
|
||||
required => 0,
|
||||
default => sub { return 10_000; }, # characters, not bytes
|
||||
);
|
||||
|
||||
has 'max_fingerprint_length' => (
|
||||
has 'max_fingerprint_length' => (
|
||||
is => 'rw',
|
||||
isa => 'Int',
|
||||
required => 0,
|
||||
@@ -187,7 +187,7 @@ override query_report => sub {
|
||||
my $real_attrib = $attrib eq 'bytes' ? 'Query_length' : $attrib;
|
||||
|
||||
if ( $type eq 'num' ) {
|
||||
foreach my $m ( qw(sum min max) ) {
|
||||
foreach my $m ( qw(sum min max) ) {
|
||||
if ( $int ) {
|
||||
$global_data->{metrics}->{$real_attrib}->{$m}
|
||||
= sprintf('%d', $store->{$m} || 0);
|
||||
@@ -214,7 +214,7 @@ override query_report => sub {
|
||||
else {
|
||||
$global_data->{metrics}->{$real_attrib}->{avg}
|
||||
= sprintf('%.6f', $store->{sum} / $store->{cnt});
|
||||
}
|
||||
}
|
||||
}
|
||||
elsif ( $type eq 'bool' ) {
|
||||
my $store = $results->{globals}->{$real_attrib};
|
||||
@@ -287,7 +287,7 @@ override query_report => sub {
|
||||
else {
|
||||
my $type = $attrib eq 'Query_length' ? 'num' : $ea->type_for($attrib) || 'string';
|
||||
if ( $type eq 'string' ) {
|
||||
$metrics{$attrib} = { value => $metrics{$attrib}{max} };
|
||||
$metrics{$attrib} = { value => $metrics{$attrib}{max} };
|
||||
}
|
||||
elsif ( $type eq 'num' ) {
|
||||
# Avoid scientific notation in the metrics by forcing it to use
|
||||
@@ -311,14 +311,14 @@ override query_report => sub {
|
||||
}
|
||||
|
||||
# Add "copy-paste" info, i.e. this stuff from the regular report:
|
||||
#
|
||||
#
|
||||
# Tables
|
||||
# SHOW TABLE STATUS FROM `db2` LIKE 'tuningdetail_21_265507'\G
|
||||
# SHOW CREATE TABLE `db2`.`tuningdetail_21_265507`\G
|
||||
# SHOW TABLE STATUS FROM `db1` LIKE 'gonzo'\G
|
||||
# SHOW CREATE TABLE `db1`.`gonzo`\G
|
||||
# update db2.tuningdetail_21_265507 n
|
||||
# inner join db1.gonzo a using(gonzo)
|
||||
# inner join db1.gonzo a using(gonzo)
|
||||
# set n.column1 = a.column1, n.word3 = a.word3\G
|
||||
# Converted for EXPLAIN
|
||||
# EXPLAIN /*!50100 PARTITIONS*/
|
||||
|
10
lib/Lmo.pm
10
lib/Lmo.pm
@@ -81,7 +81,7 @@ sub extends {
|
||||
|
||||
sub _load_module {
|
||||
my ($class) = @_;
|
||||
|
||||
|
||||
# Try loading the class, but don't croak if we fail.
|
||||
(my $file = $class) =~ s{::|'}{/}g;
|
||||
$file .= '.pm';
|
||||
@@ -115,7 +115,7 @@ sub has {
|
||||
my $caller = scalar caller();
|
||||
|
||||
my $class_metadata = Lmo::Meta->metadata_for($caller);
|
||||
|
||||
|
||||
for my $attribute ( ref $names ? @$names : $names ) {
|
||||
my %args = @_;
|
||||
my $method = ($args{is} || '') eq 'ro'
|
||||
@@ -135,16 +135,16 @@ sub has {
|
||||
# isa => Constraint,
|
||||
if ( my $type_check = $args{isa} ) {
|
||||
my $check_name = $type_check;
|
||||
|
||||
|
||||
if ( my ($aggregate_type, $inner_type) = $type_check =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) {
|
||||
$type_check = Lmo::Types::_nested_constraints($attribute, $aggregate_type, $inner_type);
|
||||
}
|
||||
|
||||
|
||||
my $check_sub = sub {
|
||||
my ($new_val) = @_;
|
||||
Lmo::Types::check_type_constraints($attribute, $type_check, $check_name, $new_val);
|
||||
};
|
||||
|
||||
|
||||
$class_metadata->{$attribute}{isa} = [$check_name, $check_sub];
|
||||
my $orig_method = $method;
|
||||
$method = sub {
|
||||
|
@@ -56,9 +56,9 @@ sub import {
|
||||
$INFO{$target} = { is_role => 1 };
|
||||
# get symbol table reference_unimport_coderefs
|
||||
my $stash = _stash_for $target;
|
||||
|
||||
|
||||
_install_tracked $target => has => \*Lmo::has;
|
||||
|
||||
|
||||
# install before/after/around subs
|
||||
foreach my $type (qw(before after around)) {
|
||||
_install_tracked $target => $type => sub {
|
||||
@@ -66,11 +66,11 @@ sub import {
|
||||
push @{$INFO{$target}{modifiers}||=[]}, [ $type => @_ ];
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
_install_tracked $target => requires => sub {
|
||||
push @{$INFO{$target}{requires}||=[]}, @_;
|
||||
};
|
||||
|
||||
|
||||
_install_tracked $target => with => \*Lmo::with;
|
||||
|
||||
# grab all *non-constant* (stash slot is not a scalarref) subs present
|
||||
|
@@ -187,7 +187,7 @@ sub recurse_to_slaves {
|
||||
}
|
||||
|
||||
my $dbh = $args->{dbh};
|
||||
|
||||
|
||||
DBH: {
|
||||
if ( !defined $dbh ) {
|
||||
foreach my $known_slave ( @{$args->{slaves}} ) {
|
||||
|
@@ -94,7 +94,7 @@ sub pending_changes {
|
||||
# uses sth attributes to return a pseudo table struct for the query's columns.
|
||||
sub get_result_set_struct {
|
||||
my ( $dbh, $sth ) = @_;
|
||||
my @cols = map {
|
||||
my @cols = map {
|
||||
my $name = $_;
|
||||
my $name_len = length($name);
|
||||
if ( $name_len > 64 ) {
|
||||
@@ -109,7 +109,7 @@ sub get_result_set_struct {
|
||||
my @nullable = map { $dbh->type_info($_)->{NULLABLE} == 1 ? 1 : 0 } @{$sth->{TYPE}};
|
||||
|
||||
my $struct = {
|
||||
cols => \@cols,
|
||||
cols => \@cols,
|
||||
# collation_for => {}, RowDiff::key_cmp() may need this.
|
||||
};
|
||||
|
||||
@@ -120,7 +120,7 @@ sub get_result_set_struct {
|
||||
$struct->{col_posn}->{$col} = $i;
|
||||
$struct->{type_for}->{$col} = $type;
|
||||
$struct->{is_nullable}->{$col} = $nullable[$i];
|
||||
$struct->{is_numeric}->{$col}
|
||||
$struct->{is_numeric}->{$col}
|
||||
= ($type =~ m/(?:(?:tiny|big|medium|small)?int|float|double|decimal|year)/ ? 1 : 0);
|
||||
|
||||
# We no longer specify the (precision, scale) for double, float, and
|
||||
|
@@ -122,13 +122,13 @@ sub _parse_config {
|
||||
}
|
||||
|
||||
handle_special_vars(\%config_data);
|
||||
|
||||
|
||||
return %config_data;
|
||||
}
|
||||
|
||||
sub handle_special_vars {
|
||||
my ($config_data) = @_;
|
||||
|
||||
|
||||
if ( $config_data->{vars}->{wsrep_provider_options} ) {
|
||||
my $vars = $config_data->{vars};
|
||||
my $dupes = $config_data->{duplicate_vars};
|
||||
@@ -191,7 +191,7 @@ sub _parse_config_output {
|
||||
vars => $vars,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
return (
|
||||
format => $format,
|
||||
vars => $vars,
|
||||
@@ -265,7 +265,7 @@ sub parse_mysqld {
|
||||
|
||||
# First look for the list of option files like
|
||||
# Default options are read from the following files in the given order:
|
||||
# /etc/my.cnf /usr/local/mysql/etc/my.cnf ~/.my.cnf
|
||||
# /etc/my.cnf /usr/local/mysql/etc/my.cnf ~/.my.cnf
|
||||
my @opt_files;
|
||||
if ( $output =~ m/^Default options are read.+\n/mg ) {
|
||||
my ($opt_files) = $output =~ m/\G^(.+)\n/m;
|
||||
@@ -288,7 +288,7 @@ sub parse_mysqld {
|
||||
# It also ends with something like
|
||||
#
|
||||
# wait_timeout 28800
|
||||
#
|
||||
#
|
||||
# To see what values a running MySQL server is using, type
|
||||
# 'mysqladmin variables' instead of 'mysqld --verbose --help'.
|
||||
#
|
||||
@@ -374,7 +374,7 @@ sub _preprocess_varvals {
|
||||
}
|
||||
|
||||
my ($var, $val) = ($1, $2);
|
||||
|
||||
|
||||
# Variable names are usually specified like "log-bin"
|
||||
# but in SHOW VARIABLES they're all like "log_bin".
|
||||
$var =~ tr/-/_/;
|
||||
@@ -385,7 +385,7 @@ sub _preprocess_varvals {
|
||||
if ( !defined $val ) {
|
||||
$val = '';
|
||||
}
|
||||
|
||||
|
||||
# Strip leading and trailing whitespace.
|
||||
for my $item ($var, $val) {
|
||||
$item =~ s/^\s+//;
|
||||
@@ -407,7 +407,7 @@ sub _parse_varvals {
|
||||
# Config built from parsing the given varvals.
|
||||
my %config;
|
||||
|
||||
# Discover duplicate vars.
|
||||
# Discover duplicate vars.
|
||||
my %duplicates;
|
||||
|
||||
while ( my ($var, $vals) = each %$vars ) {
|
||||
@@ -487,7 +487,7 @@ sub _mimic_show_variables {
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($vars, $format) = @args{@required_args};
|
||||
|
||||
|
||||
foreach my $var ( keys %$vars ) {
|
||||
if ( $vars->{$var} eq '' ) {
|
||||
if ( $format eq 'mysqld' ) {
|
||||
|
@@ -19,7 +19,7 @@
|
||||
# ###########################################################################
|
||||
{
|
||||
# Package: MySQLConfigComparer
|
||||
# MySQLConfigComparer compares and diffs C<MySQLConfig> objects.
|
||||
# MySQLConfigComparer compares and diffs C<MySQLConfig> objects.
|
||||
package MySQLConfigComparer;
|
||||
|
||||
use strict;
|
||||
@@ -47,7 +47,7 @@ my %alt_val_for = (
|
||||
# Optional Arguments:
|
||||
# ignore_variables - Arrayref of variables to ignore
|
||||
# numeric_variables - Arrayref of variables to compare numerically
|
||||
# optional_value_variables - Arrayref of vars whose val is optional
|
||||
# optional_value_variables - Arrayref of vars whose val is optional
|
||||
# any_value_is_true_variables - Arrayref of vars... see below
|
||||
# base_path - Hashref of variable=>base_path
|
||||
#
|
||||
@@ -71,7 +71,7 @@ sub new {
|
||||
# The vars should be compared with == instead of eq so that
|
||||
# 0 equals 0.0, etc.
|
||||
my %is_numeric = (
|
||||
long_query_time => 1,
|
||||
long_query_time => 1,
|
||||
($args{numeric_variables}
|
||||
? map { $_ => 1 } @{$args{numeric_variables}}
|
||||
: ()),
|
||||
@@ -87,7 +87,7 @@ sub new {
|
||||
($args{optional_value_variables}
|
||||
? map { $_ => 1 } @{$args{optional_value_variables}}
|
||||
: ()),
|
||||
);
|
||||
);
|
||||
|
||||
# Like value_is_optional but SHOW VARIABlES does not list a default value,
|
||||
# it only lists ON if the variable was given in a config file without or
|
||||
|
@@ -120,24 +120,24 @@ my %com_for = (
|
||||
);
|
||||
|
||||
my %flag_for = (
|
||||
'CLIENT_LONG_PASSWORD' => 1, # new more secure passwords
|
||||
'CLIENT_FOUND_ROWS' => 2, # Found instead of affected rows
|
||||
'CLIENT_LONG_FLAG' => 4, # Get all column flags
|
||||
'CLIENT_CONNECT_WITH_DB' => 8, # One can specify db on connect
|
||||
'CLIENT_NO_SCHEMA' => 16, # Don't allow database.table.column
|
||||
'CLIENT_COMPRESS' => 32, # Can use compression protocol
|
||||
'CLIENT_ODBC' => 64, # Odbc client
|
||||
'CLIENT_LOCAL_FILES' => 128, # Can use LOAD DATA LOCAL
|
||||
'CLIENT_IGNORE_SPACE' => 256, # Ignore spaces before '('
|
||||
'CLIENT_PROTOCOL_41' => 512, # New 4.1 protocol
|
||||
'CLIENT_INTERACTIVE' => 1024, # This is an interactive client
|
||||
'CLIENT_SSL' => 2048, # Switch to SSL after handshake
|
||||
'CLIENT_IGNORE_SIGPIPE' => 4096, # IGNORE sigpipes
|
||||
'CLIENT_TRANSACTIONS' => 8192, # Client knows about transactions
|
||||
'CLIENT_RESERVED' => 16384, # Old flag for 4.1 protocol
|
||||
'CLIENT_SECURE_CONNECTION' => 32768, # New 4.1 authentication
|
||||
'CLIENT_MULTI_STATEMENTS' => 65536, # Enable/disable multi-stmt support
|
||||
'CLIENT_MULTI_RESULTS' => 131072, # Enable/disable multi-results
|
||||
'CLIENT_LONG_PASSWORD' => 1, # new more secure passwords
|
||||
'CLIENT_FOUND_ROWS' => 2, # Found instead of affected rows
|
||||
'CLIENT_LONG_FLAG' => 4, # Get all column flags
|
||||
'CLIENT_CONNECT_WITH_DB' => 8, # One can specify db on connect
|
||||
'CLIENT_NO_SCHEMA' => 16, # Don't allow database.table.column
|
||||
'CLIENT_COMPRESS' => 32, # Can use compression protocol
|
||||
'CLIENT_ODBC' => 64, # Odbc client
|
||||
'CLIENT_LOCAL_FILES' => 128, # Can use LOAD DATA LOCAL
|
||||
'CLIENT_IGNORE_SPACE' => 256, # Ignore spaces before '('
|
||||
'CLIENT_PROTOCOL_41' => 512, # New 4.1 protocol
|
||||
'CLIENT_INTERACTIVE' => 1024, # This is an interactive client
|
||||
'CLIENT_SSL' => 2048, # Switch to SSL after handshake
|
||||
'CLIENT_IGNORE_SIGPIPE' => 4096, # IGNORE sigpipes
|
||||
'CLIENT_TRANSACTIONS' => 8192, # Client knows about transactions
|
||||
'CLIENT_RESERVED' => 16384, # Old flag for 4.1 protocol
|
||||
'CLIENT_SECURE_CONNECTION' => 32768, # New 4.1 authentication
|
||||
'CLIENT_MULTI_STATEMENTS' => 65536, # Enable/disable multi-stmt support
|
||||
'CLIENT_MULTI_RESULTS' => 131072, # Enable/disable multi-results
|
||||
);
|
||||
|
||||
use constant {
|
||||
@@ -369,7 +369,7 @@ sub parse_event {
|
||||
PTDEBUG && _d('Appending data to buff; expecting',
|
||||
$session->{buff_left}, 'more bytes');
|
||||
}
|
||||
else {
|
||||
else {
|
||||
# Remove the first MySQL header. A single TCP packet can contain many
|
||||
# MySQL packets, but we only look at the first. The 2nd and subsequent
|
||||
# packets are usually parts of a result set returned by the server, but
|
||||
@@ -472,7 +472,7 @@ sub _packet_from_server {
|
||||
die "I need a packet" unless $packet;
|
||||
die "I need a session" unless $session;
|
||||
|
||||
PTDEBUG && _d('Packet is from server; client state:', $session->{state});
|
||||
PTDEBUG && _d('Packet is from server; client state:', $session->{state});
|
||||
|
||||
if ( ($session->{server_seq} || '') eq $packet->{seq} ) {
|
||||
push @{ $session->{server_retransmissions} }, $packet->{seq};
|
||||
@@ -529,7 +529,7 @@ sub _packet_from_server {
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ( $first_byte eq '00' ) {
|
||||
if ( $first_byte eq '00' ) {
|
||||
if ( ($session->{state} || '') eq 'client_auth' ) {
|
||||
# We logged in OK! Trigger an admin Connect command.
|
||||
|
||||
@@ -601,7 +601,7 @@ sub _packet_from_server {
|
||||
},
|
||||
$packet, $session
|
||||
);
|
||||
}
|
||||
}
|
||||
else {
|
||||
PTDEBUG && _d('Looks like an OK packet but session has no cmd');
|
||||
}
|
||||
@@ -706,7 +706,7 @@ sub _packet_from_server {
|
||||
# packet.
|
||||
my ( $warning_count, $status_flags )
|
||||
= $data =~ m/fe(.{4})(.{4})\Z/;
|
||||
if ( $warning_count ) {
|
||||
if ( $warning_count ) {
|
||||
$event->{Warnings} = to_num($warning_count);
|
||||
my $flags = to_num($status_flags); # TODO set all flags?
|
||||
$event->{No_good_index_used}
|
||||
@@ -739,7 +739,7 @@ sub _packet_from_client {
|
||||
die "I need a packet" unless $packet;
|
||||
die "I need a session" unless $session;
|
||||
|
||||
PTDEBUG && _d('Packet is from client; state:', $session->{state});
|
||||
PTDEBUG && _d('Packet is from client; state:', $session->{state});
|
||||
|
||||
if ( ($session->{client_seq} || '') eq $packet->{seq} ) {
|
||||
push @{ $session->{client_retransmissions} }, $packet->{seq};
|
||||
@@ -784,7 +784,7 @@ sub _packet_from_client {
|
||||
elsif ( ($session->{state} || '') eq 'awaiting_reply' ) {
|
||||
my $arg = $session->{cmd}->{arg} ? substr($session->{cmd}->{arg}, 0, 50)
|
||||
: 'unknown';
|
||||
PTDEBUG && _d('More data for previous command:', $arg, '...');
|
||||
PTDEBUG && _d('More data for previous command:', $arg, '...');
|
||||
return;
|
||||
}
|
||||
else {
|
||||
@@ -1071,7 +1071,7 @@ sub get_lcb {
|
||||
# 4 23 SQL state marker, always '#'
|
||||
# 6 00 00 00 00 00 SQL state
|
||||
# 16 00 ... Error message
|
||||
# The sqlstate marker and actual sqlstate are combined into one value.
|
||||
# The sqlstate marker and actual sqlstate are combined into one value.
|
||||
sub parse_error_packet {
|
||||
my ( $data ) = @_;
|
||||
return unless $data;
|
||||
@@ -1207,9 +1207,9 @@ sub parse_client_handshake_packet {
|
||||
(..) # Length-coding byte for scramble buff
|
||||
}x;
|
||||
|
||||
# This packet is easy to detect because it's the only case where
|
||||
# the server sends the client a packet first (its handshake) and
|
||||
# then the client only and ever sends back its handshake.
|
||||
# This packet is easy to detect because it's the only case where
|
||||
# the server sends the client a packet first (its handshake) and
|
||||
# then the client only and ever sends back its handshake.
|
||||
if ( !$buff_len ) {
|
||||
PTDEBUG && _d('Did not match client handshake packet');
|
||||
return;
|
||||
@@ -1217,7 +1217,7 @@ sub parse_client_handshake_packet {
|
||||
|
||||
my $code_len = hex($buff_len);
|
||||
my $db;
|
||||
|
||||
|
||||
# Only try to get the db if CLIENT_CONNECT_WITH_DB flag is set
|
||||
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse41
|
||||
my $capability_flags = to_num($flags); # $flags is stored as little endian.
|
||||
@@ -1296,12 +1296,12 @@ sub parse_execute_packet {
|
||||
my $null_count = int(($sth->{num_params} + 7) / 8) || 1;
|
||||
my $null_bitmap = to_num(substr($data, 20, $null_count * 2));
|
||||
PTDEBUG && _d('NULL bitmap:', $null_bitmap, 'count:', $null_count);
|
||||
|
||||
|
||||
# This chops off everything up to the byte for new params.
|
||||
substr($data, 0, 20 + ($null_count * 2), '');
|
||||
|
||||
my $new_params = to_num(substr($data, 0, 2, ''));
|
||||
my @types;
|
||||
my @types;
|
||||
if ( $new_params ) {
|
||||
PTDEBUG && _d('New param types');
|
||||
# It seems all params are type 254, MYSQL_TYPE_STRING. Perhaps
|
||||
|
@@ -37,7 +37,7 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
||||
# oktorun - Callback that returns true if it's ok to continue running.
|
||||
#
|
||||
# Returns:
|
||||
# MySQLStatusWaiter object
|
||||
# MySQLStatusWaiter object
|
||||
sub new {
|
||||
my ( $class, %args ) = @_;
|
||||
my @required_args = qw(max_spec get_status sleep oktorun);
|
||||
@@ -111,7 +111,7 @@ sub _parse_spec {
|
||||
}
|
||||
}
|
||||
|
||||
return \%max_val_for;
|
||||
return \%max_val_for;
|
||||
}
|
||||
|
||||
# Sub: max_values
|
||||
|
@@ -53,7 +53,7 @@ $Data::Dumper::Quotekeys = 0;
|
||||
# order_by - Add ORDER BY to nibble SQL (default no)
|
||||
#
|
||||
# Returns:
|
||||
# NibbleIterator object
|
||||
# NibbleIterator object
|
||||
sub new {
|
||||
my ( $class, %args ) = @_;
|
||||
my @required_args = qw(Cxn tbl chunk_size OptionParser Quoter TableNibbler TableParser);
|
||||
@@ -136,7 +136,7 @@ sub new {
|
||||
|
||||
sub switch_to_nibble {
|
||||
my $self = shift;
|
||||
my $params = _nibble_params($self->{nibble_params}, $self->{tbl}, $self->{args}, $self->{cols},
|
||||
my $params = _nibble_params($self->{nibble_params}, $self->{tbl}, $self->{args}, $self->{cols},
|
||||
$self->{chunk_size}, $self->{where}, $self->{comments}, $self->{Quoter});
|
||||
|
||||
$self->{one_nibble} = 0;
|
||||
@@ -176,7 +176,7 @@ sub _one_nibble {
|
||||
my $explain_nibble_sql
|
||||
= "EXPLAIN SELECT "
|
||||
. ($args->{select} ? $args->{select}
|
||||
: join(', ', map{ $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum'
|
||||
: join(', ', map{ $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum'
|
||||
? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_) } @$cols))
|
||||
. " FROM $tbl->{name}"
|
||||
. ($where ? " WHERE $where" : '')
|
||||
@@ -296,7 +296,7 @@ sub _nibble_params {
|
||||
. " /*$comments->{nibble}*/";
|
||||
PTDEBUG && _d('Nibble statement:', $nibble_sql);
|
||||
|
||||
my $explain_nibble_sql
|
||||
my $explain_nibble_sql
|
||||
= "EXPLAIN SELECT "
|
||||
. ($args->{select} ? $args->{select}
|
||||
: join(', ', map { $q->quote($_) } @{$asc->{cols}}))
|
||||
@@ -388,7 +388,7 @@ sub next {
|
||||
sleep($self->{sleep});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# If no rows, then we just got the next boundaries, which start
|
||||
# the next nibble.
|
||||
if ( !$self->{have_rows} ) {
|
||||
@@ -426,7 +426,7 @@ sub next {
|
||||
}
|
||||
$self->{rowno} = 0;
|
||||
$self->{have_rows} = 0;
|
||||
|
||||
|
||||
}
|
||||
|
||||
PTDEBUG && _d('Done nibbling');
|
||||
@@ -580,7 +580,7 @@ sub can_nibble {
|
||||
# The table can be nibbled if this point is reached, else we would have
|
||||
# died earlier. Return some values about nibbling the table.
|
||||
my $pause_file = ($o->has('pause-file') && $o->get('pause-file')) || undef;
|
||||
|
||||
|
||||
return {
|
||||
row_est => $row_est, # nibble about this many rows
|
||||
index => $index, # using this index
|
||||
@@ -632,9 +632,9 @@ sub _find_best_index {
|
||||
push @possible_indexes, $want_index;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# still no best index?
|
||||
# prefer unique index. otherwise put in candidates array.
|
||||
# prefer unique index. otherwise put in candidates array.
|
||||
if (!$best_index) {
|
||||
PTDEBUG && _d('Auto-selecting best index');
|
||||
foreach my $index ( $tp->sort_indexes($tbl_struct) ) {
|
||||
@@ -648,7 +648,7 @@ sub _find_best_index {
|
||||
}
|
||||
}
|
||||
|
||||
# choose the one with best cardinality
|
||||
# choose the one with best cardinality
|
||||
if ( !$best_index && @possible_indexes ) {
|
||||
PTDEBUG && _d('No PRIMARY or unique indexes;',
|
||||
'will use index with highest cardinality');
|
||||
@@ -740,7 +740,7 @@ sub _prepare_sths {
|
||||
return;
|
||||
}
|
||||
|
||||
sub _get_bounds {
|
||||
sub _get_bounds {
|
||||
my ($self) = @_;
|
||||
|
||||
if ( $self->{one_nibble} ) {
|
||||
@@ -754,7 +754,7 @@ sub _get_bounds {
|
||||
|
||||
# Get the real first lower boundary.
|
||||
$self->{first_lower} = $dbh->selectrow_arrayref($self->{first_lb_sql});
|
||||
PTDEBUG && _d('First lower boundary:', Dumper($self->{first_lower}));
|
||||
PTDEBUG && _d('First lower boundary:', Dumper($self->{first_lower}));
|
||||
|
||||
# The next boundary is the first lower boundary. If resuming,
|
||||
# this should be something > the real first lower boundary and
|
||||
@@ -772,9 +772,9 @@ sub _get_bounds {
|
||||
}
|
||||
}
|
||||
else {
|
||||
$self->{next_lower} = $self->{first_lower};
|
||||
$self->{next_lower} = $self->{first_lower};
|
||||
}
|
||||
PTDEBUG && _d('Next lower boundary:', Dumper($self->{next_lower}));
|
||||
PTDEBUG && _d('Next lower boundary:', Dumper($self->{next_lower}));
|
||||
|
||||
if ( !$self->{next_lower} ) {
|
||||
# This happens if we resume from the end of the table, or if the
|
||||
@@ -915,7 +915,7 @@ sub _next_boundaries {
|
||||
$self->{upper} = $dbh->selectrow_arrayref($self->{last_ub_sql});
|
||||
PTDEBUG && _d('Last upper boundary:', Dumper($self->{upper}));
|
||||
$self->{no_more_boundaries} = 1; # for next call
|
||||
|
||||
|
||||
# OobNibbleIterator needs to know the last upper boundary.
|
||||
$self->{last_upper} = $self->{upper};
|
||||
}
|
||||
|
@@ -96,7 +96,7 @@ sub new {
|
||||
. $tail_sql
|
||||
. " /*past upper chunk*/";
|
||||
PTDEBUG && _d('Past upper statement:', $past_upper_sql);
|
||||
|
||||
|
||||
my $explain_past_upper_sql
|
||||
= "EXPLAIN SELECT "
|
||||
. ($args{past_select}
|
||||
@@ -146,7 +146,7 @@ sub statements {
|
||||
# Get the parent's statements.
|
||||
my $sths = $self->SUPER::statements();
|
||||
|
||||
# Add our special statements.
|
||||
# Add our special statements.
|
||||
$sths->{past_lower_boundary} = $self->{past_lower_sth};
|
||||
$sths->{past_upper_boundary} = $self->{past_upper_sth};
|
||||
|
||||
|
@@ -95,7 +95,7 @@ sub new {
|
||||
rules => [], # desc of rules for --help
|
||||
mutex => [], # rule: opts are mutually exclusive
|
||||
atleast1 => [], # rule: at least one opt is required
|
||||
disables => {}, # rule: opt disables other opts
|
||||
disables => {}, # rule: opt disables other opts
|
||||
defaults_to => {}, # rule: opt defaults to value of other opt
|
||||
DSNParser => undef,
|
||||
default_files => [
|
||||
@@ -305,7 +305,7 @@ sub _pod_to_specs {
|
||||
}
|
||||
|
||||
push @specs, {
|
||||
spec => $self->{parse_attributes}->($self, $option, \%attribs),
|
||||
spec => $self->{parse_attributes}->($self, $option, \%attribs),
|
||||
desc => $para
|
||||
. (defined $attribs{default} ? " (default $attribs{default})" : ''),
|
||||
group => ($attribs{'group'} ? $attribs{'group'} : 'default'),
|
||||
@@ -426,7 +426,7 @@ sub _parse_specs {
|
||||
$self->{opts}->{$long} = $opt;
|
||||
}
|
||||
else { # It's an option rule, not a spec.
|
||||
PTDEBUG && _d('Parsing rule:', $opt);
|
||||
PTDEBUG && _d('Parsing rule:', $opt);
|
||||
push @{$self->{rules}}, $opt;
|
||||
my @participants = $self->_get_participants($opt);
|
||||
my $rule_ok = 0;
|
||||
@@ -478,7 +478,7 @@ sub _parse_specs {
|
||||
PTDEBUG && _d('Option', $long, 'disables', @participants);
|
||||
}
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
# Sub: _get_participants
|
||||
@@ -599,9 +599,9 @@ sub _set_option {
|
||||
# later by <get()>, <got()>, and <set()>. Call <get_specs()>
|
||||
# before calling this sub.
|
||||
sub get_opts {
|
||||
my ( $self ) = @_;
|
||||
my ( $self ) = @_;
|
||||
|
||||
# Reset opts.
|
||||
# Reset opts.
|
||||
foreach my $long ( keys %{$self->{opts}} ) {
|
||||
$self->{opts}->{$long}->{got} = 0;
|
||||
$self->{opts}->{$long}->{value}
|
||||
@@ -749,7 +749,7 @@ sub _check_opts {
|
||||
else {
|
||||
$err = join(', ',
|
||||
map { "--$self->{opts}->{$_}->{long}" }
|
||||
grep { $_ }
|
||||
grep { $_ }
|
||||
@restricted_opts[0..scalar(@restricted_opts) - 2]
|
||||
)
|
||||
. ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long};
|
||||
@@ -759,7 +759,7 @@ sub _check_opts {
|
||||
}
|
||||
|
||||
}
|
||||
elsif ( $opt->{is_required} ) {
|
||||
elsif ( $opt->{is_required} ) {
|
||||
$self->save_error("Required option --$long must be specified");
|
||||
}
|
||||
|
||||
@@ -1197,7 +1197,7 @@ sub _read_config_file {
|
||||
# This is the default MAGIC_foo_table:
|
||||
#
|
||||
# CREATE TABLE `foo` (i INT)
|
||||
#
|
||||
#
|
||||
# Blah blah...
|
||||
# (end code)
|
||||
# Then to get that CREATE TABLE, you pass "MAGIC_foo_table" as the
|
||||
@@ -1234,7 +1234,7 @@ sub read_para_after {
|
||||
sub clone {
|
||||
my ( $self ) = @_;
|
||||
|
||||
# Deep-copy contents of hashrefs; do not just copy the refs.
|
||||
# Deep-copy contents of hashrefs; do not just copy the refs.
|
||||
my %clone = map {
|
||||
my $hashref = $self->{$_};
|
||||
my $val_copy = {};
|
||||
@@ -1253,7 +1253,7 @@ sub clone {
|
||||
$clone{$scalar} = $self->{$scalar};
|
||||
}
|
||||
|
||||
return bless \%clone;
|
||||
return bless \%clone;
|
||||
}
|
||||
|
||||
sub _parse_size {
|
||||
|
@@ -158,7 +158,7 @@ sub output {
|
||||
|
||||
my $output = '';
|
||||
{
|
||||
if ( $file ) {
|
||||
if ( $file ) {
|
||||
open *output_fh, '>', $file
|
||||
or die "Cannot open file $file: $OS_ERROR";
|
||||
}
|
||||
@@ -600,7 +600,7 @@ sub no_diff {
|
||||
|
||||
# diff returns 0 if there were no differences,
|
||||
# so !0 = 1 = no diff in our testing parlance.
|
||||
$retval = $retval >> 8;
|
||||
$retval = $retval >> 8;
|
||||
|
||||
if ( $retval ) {
|
||||
diag($out);
|
||||
@@ -762,7 +762,7 @@ sub full_output {
|
||||
|
||||
unlink $file;
|
||||
unlink $file2;
|
||||
|
||||
|
||||
return ($output, $status);
|
||||
}
|
||||
|
||||
|
@@ -87,7 +87,7 @@ sub find_cluster_nodes {
|
||||
# useful for safety.
|
||||
# TODO this fails with a strange error.
|
||||
#$dp->fill_in_dsn($dbh, $dsn);
|
||||
|
||||
|
||||
my $sql = q{SHOW STATUS LIKE 'wsrep\_incoming\_addresses'};
|
||||
PTDEBUG && _d($sql);
|
||||
my (undef, $addresses) = $dbh->selectrow_array($sql);
|
||||
@@ -174,7 +174,7 @@ sub autodetect_nodes {
|
||||
my $new_nodes = [];
|
||||
|
||||
return $new_nodes unless @$nodes;
|
||||
|
||||
|
||||
for my $node ( @$nodes ) {
|
||||
my $nodes_found = $self->find_cluster_nodes(
|
||||
dbh => $node->dbh(),
|
||||
@@ -208,12 +208,12 @@ sub autodetect_nodes {
|
||||
# If some of the new slaves is a cluster node, autodetect new nodes
|
||||
# from there too.
|
||||
my @new_slave_nodes = grep { $self->is_cluster_node($_) } @$new_slaves;
|
||||
|
||||
|
||||
my $slaves_of_slaves = $self->autodetect_nodes(
|
||||
%args,
|
||||
nodes => \@new_slave_nodes,
|
||||
);
|
||||
|
||||
|
||||
my @autodetected_nodes = ( @$new_nodes, @$new_slaves, @$slaves_of_slaves );
|
||||
return \@autodetected_nodes;
|
||||
}
|
||||
|
@@ -152,7 +152,7 @@ sub output {
|
||||
|
||||
my $output = '';
|
||||
{
|
||||
if ( $file ) {
|
||||
if ( $file ) {
|
||||
open *output_fh, '>', $file
|
||||
or die "Cannot open file $file: $OS_ERROR";
|
||||
}
|
||||
@@ -400,7 +400,7 @@ sub test_log_parser {
|
||||
misc => $args{misc},
|
||||
oktorun => $args{oktorun},
|
||||
);
|
||||
while ( my $e = $p->parse_event(%parser_args) ) {
|
||||
while ( my $e = $p->parse_event(%parser_args) ) {
|
||||
push @e, $e;
|
||||
}
|
||||
close $fh;
|
||||
@@ -637,7 +637,7 @@ sub no_diff {
|
||||
|
||||
# diff returns 0 if there were no differences,
|
||||
# so !0 = 1 = no diff in our testing parlance.
|
||||
$retval = $retval >> 8;
|
||||
$retval = $retval >> 8;
|
||||
|
||||
if ( $retval ) {
|
||||
if ( $ENV{UPDATE_SAMPLES} || $args{update_sample} ) {
|
||||
@@ -653,7 +653,7 @@ sub no_diff {
|
||||
if ( $res_file ne $tmp_file ) {
|
||||
unlink $res_file if -f $res_file;
|
||||
}
|
||||
|
||||
|
||||
if ( $cmp_file ne $expected_output ) {
|
||||
unlink $cmp_file if -f $cmp_file;
|
||||
}
|
||||
@@ -802,7 +802,7 @@ sub full_output {
|
||||
|
||||
unlink $file;
|
||||
unlink $file2;
|
||||
|
||||
|
||||
return ($output, $status);
|
||||
}
|
||||
|
||||
|
@@ -68,7 +68,7 @@ sub new {
|
||||
};
|
||||
return bless $self, $class;
|
||||
}
|
||||
|
||||
|
||||
sub get_items {
|
||||
my ( $self, $section ) = @_;
|
||||
return $section ? $self->{items}->{$section} : $self->{items};
|
||||
@@ -110,7 +110,7 @@ sub parse_from_file {
|
||||
# these command are passed to textblock().
|
||||
sub command {
|
||||
my ( $self, $cmd, $name ) = @_;
|
||||
|
||||
|
||||
$name =~ s/\s+\Z//m; # Remove \n and blank line after name.
|
||||
|
||||
if ( $cmd eq 'head1' ) {
|
||||
@@ -145,7 +145,7 @@ sub command {
|
||||
else {
|
||||
$self->{current_section} = '';
|
||||
}
|
||||
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -35,8 +35,8 @@ $Data::Dumper::Quotekeys = 0;
|
||||
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
||||
use constant {
|
||||
# 0-7 are the standard processlist columns.
|
||||
ID => 0,
|
||||
USER => 1,
|
||||
ID => 0,
|
||||
USER => 1,
|
||||
HOST => 2,
|
||||
DB => 3,
|
||||
COMMAND => 4,
|
||||
@@ -265,7 +265,7 @@ sub parse_event {
|
||||
# the query has restarted. I.e. the new start time is after
|
||||
# the previous start time.
|
||||
my $ms = $self->{MasterSlave};
|
||||
|
||||
|
||||
my $is_repl_thread = $ms->is_replication_thread({
|
||||
Command => $curr->[COMMAND],
|
||||
User => $curr->[USER],
|
||||
@@ -310,7 +310,7 @@ sub parse_event {
|
||||
];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
PTDEBUG && _d('New cxn', $curr->[ID]);
|
||||
if ( $curr->[INFO] && defined $curr->[TIME] ) {
|
||||
@@ -340,7 +340,7 @@ sub parse_event {
|
||||
$self->make_event($prev, $time);
|
||||
delete $active_cxn->{$prev->[ID]};
|
||||
}
|
||||
elsif ( ($curr_cxn->{$prev->[ID]}->[COMMAND] || "") eq 'Sleep'
|
||||
elsif ( ($curr_cxn->{$prev->[ID]}->[COMMAND] || "") eq 'Sleep'
|
||||
|| !$curr_cxn->{$prev->[ID]}->[STATE]
|
||||
|| !$curr_cxn->{$prev->[ID]}->[INFO] ) {
|
||||
PTDEBUG && _d('cxn', $prev->[ID], 'became idle');
|
||||
@@ -529,7 +529,7 @@ sub find {
|
||||
push @{$self->{_reasons_for_matching}->{$query} ||= []}, $reason;
|
||||
$matched++;
|
||||
}
|
||||
|
||||
|
||||
PROPERTY:
|
||||
foreach my $property ( qw(Id User Host db State Command Info) ) {
|
||||
my $filter = "_find_match_$property";
|
||||
|
@@ -86,7 +86,7 @@ sub parse_event {
|
||||
$args{stats}->{events_parsed}++ if $args{stats};
|
||||
} sort { $a->{seq} <=> $b->{seq} }
|
||||
@{$session->{client_packets}};
|
||||
|
||||
|
||||
map {
|
||||
$event = $self->_parse_packet($_, $args{misc});
|
||||
$args{stats}->{events_parsed}++ if $args{stats};
|
||||
|
@@ -46,7 +46,7 @@ use constant MAX_STRING_LENGTH => 10;
|
||||
{ local $EVAL_ERROR; eval { require ReportFormatter } };
|
||||
|
||||
# Sub: new
|
||||
#
|
||||
#
|
||||
# Parameters:
|
||||
# %args - Required arguments
|
||||
#
|
||||
@@ -171,7 +171,7 @@ sub print_reports {
|
||||
my $last_report;
|
||||
|
||||
foreach my $report ( @$reports ) {
|
||||
PTDEBUG && _d('Printing', $report, 'report');
|
||||
PTDEBUG && _d('Printing', $report, 'report');
|
||||
my $report_output = $self->$report(%args);
|
||||
if ( $report_output ) {
|
||||
print "\n"
|
||||
@@ -306,7 +306,7 @@ sub header {
|
||||
my $store = $results->{globals}->{$attrib};
|
||||
my $metrics = $ea->stats()->{globals}->{$attrib};
|
||||
my $func = $attrib =~ m/time|wait$/ ? \µ_t : \&shorten;
|
||||
my @values = (
|
||||
my @values = (
|
||||
@{$store}{qw(sum min max)},
|
||||
$store->{sum} / $store->{cnt},
|
||||
@{$metrics}{qw(pct_95 stddev median)},
|
||||
@@ -327,7 +327,7 @@ sub header {
|
||||
next unless exists $results->{globals}->{$attrib};
|
||||
|
||||
my $store = $results->{globals}->{$attrib};
|
||||
if ( $store->{sum} > 0 ) {
|
||||
if ( $store->{sum} > 0 ) {
|
||||
push @result,
|
||||
sprintf $self->{bool_format},
|
||||
$self->make_label($attrib), $self->bool_percents($store);
|
||||
@@ -476,7 +476,7 @@ sub query_report {
|
||||
|
||||
my $partitions_msg = $self->{no_partitions} ? '' : '/*!50100 PARTITIONS*/';
|
||||
if ( $groupby eq 'fingerprint' ) {
|
||||
# Shorten it if necessary (issue 216 and 292).
|
||||
# Shorten it if necessary (issue 216 and 292).
|
||||
my $samp_query = $qr->shorten($vals->{samp_query}, $self->{options}->{shorten})
|
||||
if $self->{options}->{shorten};
|
||||
|
||||
@@ -503,12 +503,12 @@ sub query_report {
|
||||
$report .= "$samp_query${mark}\n";
|
||||
}
|
||||
else {
|
||||
$report .= "# EXPLAIN $partitions_msg\n$samp_query${mark}\n";
|
||||
$report .= "# EXPLAIN $partitions_msg\n$samp_query${mark}\n";
|
||||
$report .= $self->explain_report($samp_query, $vals->{default_db});
|
||||
}
|
||||
}
|
||||
else {
|
||||
$report .= "$samp_query${mark}\n";
|
||||
$report .= "$samp_query${mark}\n";
|
||||
my $converted = $qr->convert_to_select($samp_query);
|
||||
if ( $converted
|
||||
&& $converted =~ m/^[\(\s]*select/i ) {
|
||||
@@ -867,7 +867,7 @@ sub profile {
|
||||
$qr->distill($samp_query, %{$args{distill_args}}) : $item,
|
||||
id => $groupby eq 'fingerprint' ? make_checksum($item) : '',
|
||||
vmr => ($query_time->{stddev}**2) / ($query_time->{avg} || 1),
|
||||
);
|
||||
);
|
||||
|
||||
push @profiles, \%profile;
|
||||
}
|
||||
@@ -1006,7 +1006,7 @@ sub prepared {
|
||||
}
|
||||
|
||||
push @prepared, {
|
||||
prep_r => $prep_r,
|
||||
prep_r => $prep_r,
|
||||
prep_cnt => $prep_cnt,
|
||||
exec_r => $exec_r,
|
||||
exec_cnt => $exec_cnt,
|
||||
@@ -1057,7 +1057,7 @@ sub make_global_header {
|
||||
my ( $self ) = @_;
|
||||
my @lines;
|
||||
|
||||
# First line:
|
||||
# First line:
|
||||
# Attribute total min max avg 95% stddev median
|
||||
push @lines,
|
||||
sprintf $self->{num_format}, "Attribute", '', @{$self->global_headers()};
|
||||
@@ -1140,7 +1140,7 @@ sub bool_percents {
|
||||
# Does pretty-printing for lists of strings like users, hosts, db.
|
||||
sub format_string_list {
|
||||
my ( $self, $attrib, $vals, $class_cnt ) = @_;
|
||||
|
||||
|
||||
# Only class result values have unq. So if unq doesn't exist,
|
||||
# then we've been given global values.
|
||||
if ( !exists $vals->{unq} ) {
|
||||
|
@@ -174,21 +174,21 @@ sub fingerprint {
|
||||
&& return $query;
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# Remove quoted strings
|
||||
# Remove quoted strings
|
||||
# -----------------------------------------------------------
|
||||
$query =~ s/([^\\])(\\')/$1/sg;
|
||||
$query =~ s/([^\\])(\\")/$1/sg;
|
||||
$query =~ s/\\\\//sg;
|
||||
$query =~ s/\\'//sg;
|
||||
$query =~ s/\\"//sg;
|
||||
$query =~ s/([^\\])(".*?[^\\]?")/$1?/sg;
|
||||
$query =~ s/([^\\])('.*?[^\\]?')/$1?/sg;
|
||||
$query =~ s/([^\\])(\\')/$1/sg;
|
||||
$query =~ s/([^\\])(\\")/$1/sg;
|
||||
$query =~ s/\\\\//sg;
|
||||
$query =~ s/\\'//sg;
|
||||
$query =~ s/\\"//sg;
|
||||
$query =~ s/([^\\])(".*?[^\\]?")/$1?/sg;
|
||||
$query =~ s/([^\\])('.*?[^\\]?')/$1?/sg;
|
||||
# -----------------------------------------------------------
|
||||
|
||||
$query =~ s/\bfalse\b|\btrue\b/?/isg; # boolean values
|
||||
$query =~ s/\bfalse\b|\btrue\b/?/isg; # boolean values
|
||||
|
||||
# MD5 checksums which are always 32 hex chars
|
||||
if ( $self->{match_md5_checksums} ) {
|
||||
# MD5 checksums which are always 32 hex chars
|
||||
if ( $self->{match_md5_checksums} ) {
|
||||
$query =~ s/([._-])[a-f0-9]{32}/$1?/g;
|
||||
}
|
||||
|
||||
@@ -204,7 +204,7 @@ sub fingerprint {
|
||||
|
||||
# Clean up leftovers
|
||||
if ( $self->{match_md5_checksums} ) {
|
||||
$query =~ s/[xb+-]\?/?/g;
|
||||
$query =~ s/[xb+-]\?/?/g;
|
||||
}
|
||||
else {
|
||||
$query =~ s/[xb.+-]\?/?/g;
|
||||
@@ -270,11 +270,11 @@ sub distill_verbs {
|
||||
return $query;
|
||||
}
|
||||
|
||||
# All other, more complex verbs.
|
||||
# All other, more complex verbs.
|
||||
$query = $self->strip_comments($query);
|
||||
|
||||
# SHOW statements are either 2 or 3 words: SHOW A (B), where A and B
|
||||
# are words; B is optional. E.g. "SHOW TABLES" or "SHOW SLAVE STATUS".
|
||||
# are words; B is optional. E.g. "SHOW TABLES" or "SHOW SLAVE STATUS".
|
||||
# There's a few common keywords that may show up in place of A, so we
|
||||
# remove them first. Then there's some keywords that signify extra clauses
|
||||
# that may show up in place of B and since these clauses are at the
|
||||
@@ -412,8 +412,8 @@ sub distill {
|
||||
else {
|
||||
# For everything else, distill the tables.
|
||||
my @tables = $self->__distill_tables($query, $table, %args);
|
||||
$query = join(q{ }, $verbs, @tables);
|
||||
}
|
||||
$query = join(q{ }, $verbs, @tables);
|
||||
}
|
||||
}
|
||||
|
||||
if ( $args{trf} ) {
|
||||
|
@@ -112,7 +112,7 @@ sub split_unquote {
|
||||
s/`\z//;
|
||||
s/``/`/g;
|
||||
}
|
||||
|
||||
|
||||
return ($db, $tbl);
|
||||
}
|
||||
|
||||
|
@@ -93,7 +93,7 @@ my %modes = (
|
||||
}
|
||||
|
||||
sub cbreak {
|
||||
my ($lflag) = $_[0] || $noecho;
|
||||
my ($lflag) = $_[0] || $noecho;
|
||||
$term->setlflag($lflag);
|
||||
$term->setcc( VTIME, 1 );
|
||||
$term->setattr( $fd_stdin, TCSANOW );
|
||||
|
@@ -27,7 +27,7 @@
|
||||
# Internally, all column widths are *first* treated as percentages of the
|
||||
# line width. Even if a column is specified with width=>N where N is some
|
||||
# length of characters, this is converted to a percent/line width (rounded up).
|
||||
#
|
||||
#
|
||||
# Columns specified with width=>N or width_pct=>P (where P is some percent
|
||||
# of *total* line width, not remaining line width when used with other width=>N
|
||||
# columns) are fixed. You get exactly what you specify even if this results
|
||||
@@ -50,7 +50,7 @@
|
||||
# Extra space is distributed evenly among auto-width cols with print widths
|
||||
# less than the column's max val or header/name. This widens auto-width cols
|
||||
# to either show longer values or truncate the column header/name less.
|
||||
#
|
||||
#
|
||||
# After these adjustments, get_report() calls _truncate_headers() and
|
||||
# _truncate_line_values(). These truncate output to the columns' final,
|
||||
# calculated widths.
|
||||
|
@@ -150,7 +150,7 @@ sub next {
|
||||
|
||||
$results->{query} = $query;
|
||||
$results->{rows} = $rows;
|
||||
|
||||
|
||||
if ( my $pr = $self->_progress ) {
|
||||
$pr->update(sub { tell $_query_fh });
|
||||
}
|
||||
|
@@ -128,10 +128,10 @@ sub save {
|
||||
}
|
||||
else {
|
||||
# Save rows, if any (i.e. if it's a SELECT statement).
|
||||
# *except* if it's a SELECT...INTO (issue lp:1421781)
|
||||
# *except* if it's a SELECT...INTO (issue lp:1421781)
|
||||
my $rows;
|
||||
if ( my $sth = $results->{sth} ) {
|
||||
if ( $event->{arg} =~ m/(?:^\s*SELECT|(?:\*\/\s*SELECT))/i
|
||||
if ( $event->{arg} =~ m/(?:^\s*SELECT|(?:\*\/\s*SELECT))/i
|
||||
&& $event->{arg} !~ /INTO\s*(?:OUTFILE|DUMPFILE|@)/ ) {
|
||||
$rows = $sth->fetchall_arrayref();
|
||||
}
|
||||
|
@@ -71,7 +71,7 @@ sub make_row_checksum {
|
||||
# https://bugs.launchpad.net/percona-toolkit/+bug/1016131
|
||||
die "all columns are excluded by --columns or --ignore-columns"
|
||||
unless @{$cols->{select}};
|
||||
|
||||
|
||||
# Prepend columns to query, resulting in "col1, col2, FUNC(..col1, col2...)",
|
||||
# unless caller says not to. The only caller that says not to is
|
||||
# make_chunk_checksum() which uses this row checksum as part of a larger
|
||||
@@ -80,7 +80,7 @@ sub make_row_checksum {
|
||||
my $query;
|
||||
if ( !$args{no_cols} ) {
|
||||
$query = join(', ',
|
||||
map {
|
||||
map {
|
||||
my $col = $_;
|
||||
if ( $col =~ m/UNIX_TIMESTAMP/ ) {
|
||||
# Alias col name back to itself else its name becomes
|
||||
@@ -152,7 +152,7 @@ sub make_row_checksum {
|
||||
# func - Hash function name
|
||||
# crc_width - CRC width
|
||||
# crc_type - CRC type
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# Column list for SELECT
|
||||
sub make_chunk_checksum {
|
||||
@@ -257,7 +257,7 @@ sub get_crc_args {
|
||||
my $func = $args{func} || $self->_get_hash_func(%args);
|
||||
my $crc_width = $args{crc_width}|| $self->_get_crc_width(%args, func=>$func);
|
||||
my $crc_type = $args{crc_type} || $self->_get_crc_type(%args, func=>$func);
|
||||
my $opt_slice;
|
||||
my $opt_slice;
|
||||
if ( $args{dbh} && $crc_type !~ m/int$/ ) {
|
||||
$opt_slice = $self->_optimize_xor(%args, func=>$func);
|
||||
}
|
||||
@@ -477,8 +477,8 @@ sub find_replication_differences {
|
||||
}
|
||||
my ($dbh, $repl_table) = @args{@required_args};
|
||||
|
||||
|
||||
my $tries = $self->{'OptionParser'}->get('replicate-check-retries') || 1;
|
||||
|
||||
my $tries = $self->{'OptionParser'}->get('replicate-check-retries') || 1;
|
||||
my $diffs;
|
||||
while ($tries--) {
|
||||
my $sql
|
||||
@@ -497,7 +497,7 @@ sub find_replication_differences {
|
||||
if (!@$diffs || !$tries) { # if no differences are found OR we are out of tries left...
|
||||
last; # get out now
|
||||
}
|
||||
sleep 1;
|
||||
sleep 1;
|
||||
}
|
||||
return $diffs;
|
||||
}
|
||||
|
@@ -423,7 +423,7 @@ sub parse_select {
|
||||
# only statement with optional keywords at the end. Also, these
|
||||
# appear to be the only keywords with spaces instead of _.
|
||||
my @keywords;
|
||||
my $final_keywords = qr/(FOR UPDATE|LOCK IN SHARE MODE)/i;
|
||||
my $final_keywords = qr/(FOR UPDATE|LOCK IN SHARE MODE)/i;
|
||||
1 while $query =~ s/\s+$final_keywords/(push @keywords, $1), ''/gie;
|
||||
|
||||
my $keywords = qr/(
|
||||
@@ -678,7 +678,7 @@ sub parse_table_reference {
|
||||
# * not "fully" tested because the possibilities are infinite
|
||||
#
|
||||
# It works in four steps; let's take this WHERE clause as an example:
|
||||
#
|
||||
#
|
||||
# i="x and y" or j in ("and", "or") and x is not null or a between 1 and 10 and sz="this 'and' foo"
|
||||
#
|
||||
# The first step splits the string on and|or, the only two keywords I'm
|
||||
@@ -704,7 +704,7 @@ sub parse_table_reference {
|
||||
# The third step runs through the list of pred frags backwards and joins
|
||||
# the current frag to the preceding frag if it does not have an operator.
|
||||
# The result is:
|
||||
#
|
||||
#
|
||||
# PREDICATE FRAGMENT OPERATOR
|
||||
# ================================ ========
|
||||
# i="x and y" Y
|
||||
@@ -721,7 +721,7 @@ sub parse_table_reference {
|
||||
# The fourth step is similar but not shown: pred frags with unbalanced ' or "
|
||||
# are joined to the preceding pred frag. This fixes cases where a pred frag
|
||||
# has multiple and|or in a string value; e.g. "foo and bar or dog".
|
||||
#
|
||||
#
|
||||
# After the pred frags are complete, the parts of these predicates are parsed
|
||||
# and returned in an arrayref of hashrefs like:
|
||||
#
|
||||
@@ -858,7 +858,7 @@ sub parse_where {
|
||||
$op =~ s/\s+$//;
|
||||
}
|
||||
$val =~ s/^\s+//;
|
||||
|
||||
|
||||
# No unquoted value ends with ) except FUNCTION(...)
|
||||
if ( ($op || '') !~ m/IN/i && $val !~ m/^\w+\([^\)]+\)$/ ) {
|
||||
$val =~ s/\)+$//;
|
||||
@@ -1326,7 +1326,7 @@ sub parse_identifier {
|
||||
else {
|
||||
die "Invalid number of parts in $type reference: $ident";
|
||||
}
|
||||
|
||||
|
||||
if ( $self->{Schema} ) {
|
||||
if ( $type eq 'column' && (!$ident_struct{tbl} || !$ident_struct{db}) ) {
|
||||
my $qcol = $self->{Schema}->find_column(%ident_struct);
|
||||
|
@@ -130,7 +130,7 @@ sub create_dbs {
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
sub get_dbh_for {
|
||||
my ( $self, $server, $cxn_ops, $user ) = @_;
|
||||
_check_server($server);
|
||||
@@ -192,10 +192,10 @@ sub wipe_clean {
|
||||
# the DROP commands will just hang forever.
|
||||
my @cxns = @{$dbh->selectall_arrayref('SHOW FULL PROCESSLIST', {Slice => {}})};
|
||||
foreach my $cxn ( @cxns ) {
|
||||
if ((
|
||||
if ((
|
||||
(($cxn->{user}||'') eq 'msandbox' && ($cxn->{command}||'') eq 'Sleep')
|
||||
|| (($cxn->{User}||'') eq 'msandbox' && ($cxn->{Command}||'') eq 'Sleep')
|
||||
) && $cxn->{db}
|
||||
) && $cxn->{db}
|
||||
) {
|
||||
my $id = $cxn->{id} ? $cxn->{id} : $cxn->{Id};
|
||||
my $sql = "KILL $id /* db: $cxn->{db} */";
|
||||
@@ -505,7 +505,7 @@ sub start_sandbox {
|
||||
my $first_node = $args{first_node} ? $port_for{$args{first_node}} : '';
|
||||
my $out = `$env $trunk/sandbox/start-sandbox cluster $port $first_node`;
|
||||
die $out if $CHILD_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
my $dbh = $self->get_dbh_for($server, $args{cxn_opts});
|
||||
my $dsn = $self->dsn_for($server);
|
||||
|
@@ -165,7 +165,7 @@ sub parse_event {
|
||||
$host ||= $ip; # sometimes host is missing when using skip-name-resolve (LP #issue 1262456)
|
||||
push @properties, 'user', $user, 'host', $host, 'ip', $ip;
|
||||
# 5.6 has the thread id on the User@Host line
|
||||
if ( $thread_id ) {
|
||||
if ( $thread_id ) {
|
||||
push @properties, 'Thread_id', $thread_id;
|
||||
}
|
||||
++$got_uh;
|
||||
@@ -181,7 +181,7 @@ sub parse_event {
|
||||
$host ||= $ip; # sometimes host is missing when using skip-name-resolve (LP #issue 1262456)
|
||||
push @properties, 'user', $user, 'host', $host, 'ip', $ip;
|
||||
# 5.6 has the thread id on the User@Host line
|
||||
if ( $thread_id ) {
|
||||
if ( $thread_id ) {
|
||||
push @properties, 'Thread_id', $thread_id;
|
||||
}
|
||||
++$got_uh;
|
||||
|
@@ -73,7 +73,7 @@ sub write {
|
||||
map { $_ || 0 }
|
||||
@{$event}{qw(InnoDB_IO_r_ops InnoDB_IO_r_bytes InnoDB_IO_r_wait InnoDB_rec_lock_wait InnoDB_queue_wait InnoDB_pages_distinct)};
|
||||
|
||||
}
|
||||
}
|
||||
else {
|
||||
printf $fh "# No InnoDB statistics available for this query\n";
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@
|
||||
# ###########################################################################
|
||||
{
|
||||
# Package: SqlModes
|
||||
# SqlModes is a simple module that helps add/delete elements to the sql_mode
|
||||
# SqlModes is a simple module that helps add/delete elements to the sql_mode
|
||||
# variable in MySql.
|
||||
package SqlModes;
|
||||
|
||||
@@ -36,7 +36,7 @@ use Data::Dumper;
|
||||
# dbh - Database where to apply changes
|
||||
#
|
||||
# Returns:
|
||||
# SqlModes object
|
||||
# SqlModes object
|
||||
|
||||
sub new {
|
||||
my ( $class, $dbh, %args ) = @_;
|
||||
@@ -45,7 +45,7 @@ sub new {
|
||||
my $global = $args{'global'} ? 'GLOBAL' : '';
|
||||
|
||||
my $self = {
|
||||
dbh => $dbh,
|
||||
dbh => $dbh,
|
||||
global => $global,
|
||||
original_modes_string => '',
|
||||
};
|
||||
@@ -57,11 +57,11 @@ sub new {
|
||||
}
|
||||
|
||||
|
||||
# Sub: set_mode_string
|
||||
# sets sql_mode in traditional csv format
|
||||
# Sub: set_mode_string
|
||||
# sets sql_mode in traditional csv format
|
||||
#
|
||||
# Required Arguments:
|
||||
# string of valid formats in csv formta (or null string)
|
||||
# string of valid formats in csv formta (or null string)
|
||||
#
|
||||
# Returns:
|
||||
# 1 if successful, 0 if error.
|
||||
@@ -77,10 +77,10 @@ sub set_mode_string {
|
||||
}
|
||||
|
||||
# Sub: add
|
||||
# adds one or more modes
|
||||
# adds one or more modes
|
||||
#
|
||||
# Required Arguments:
|
||||
# list of sql modes
|
||||
# list of sql modes
|
||||
#
|
||||
# Returns:
|
||||
# 1 if successful, 0 if error.
|
||||
@@ -96,7 +96,7 @@ sub add {
|
||||
PTDEBUG && _d('adding sql_mode: ', $mode);
|
||||
}
|
||||
|
||||
my $sql_mode_string = join ",", keys %$curr_modes;
|
||||
my $sql_mode_string = join ",", keys %$curr_modes;
|
||||
|
||||
$self->{dbh}->do("set $self->{global} sql_mode = '$sql_mode_string'") || return 0;
|
||||
|
||||
@@ -108,7 +108,7 @@ sub add {
|
||||
# remove one or more modes
|
||||
#
|
||||
# Required Arguments:
|
||||
# list of sql modes
|
||||
# list of sql modes
|
||||
#
|
||||
# Returns:
|
||||
# 1 if successful, 0 if error.
|
||||
@@ -124,7 +124,7 @@ sub del {
|
||||
PTDEBUG && _d('deleting sql_mode: ', $mode);
|
||||
}
|
||||
|
||||
my $sql_mode_string = join ",", keys %$curr_modes;
|
||||
my $sql_mode_string = join ",", keys %$curr_modes;
|
||||
|
||||
$self->{dbh}->do("SET $self->{global} sql_mode = '$sql_mode_string'") || return 0;
|
||||
|
||||
@@ -132,11 +132,11 @@ sub del {
|
||||
return $curr_modes || 1;
|
||||
}
|
||||
|
||||
# Sub: has_mode
|
||||
# checks if a mode is on. (exists within the sql_mode string)
|
||||
# Sub: has_mode
|
||||
# checks if a mode is on. (exists within the sql_mode string)
|
||||
#
|
||||
# Required Arguments:
|
||||
# 1 mode string
|
||||
# 1 mode string
|
||||
#
|
||||
# Returns:
|
||||
# 1 = yes , 0 = no
|
||||
@@ -147,14 +147,14 @@ sub has_mode {
|
||||
|
||||
my (undef, $sql_mode_string) = $self->{dbh}->selectrow_array("show variables like 'sql_mode'");
|
||||
|
||||
# Need to account for occurrence at
|
||||
# Need to account for occurrence at
|
||||
# beginning, middle or end of comma separated string
|
||||
return $sql_mode_string =~ /(?:,|^)$mode(?:,|$)/;
|
||||
|
||||
}
|
||||
|
||||
# Sub: get_modes
|
||||
# get current set of sql modes
|
||||
# get current set of sql modes
|
||||
#
|
||||
# Required Arguments:
|
||||
# none
|
||||
@@ -177,13 +177,13 @@ sub get_modes {
|
||||
}
|
||||
|
||||
# Sub: get_modes_string
|
||||
# get current set of sql modes as string
|
||||
# get current set of sql modes as string
|
||||
#
|
||||
# Required Arguments:
|
||||
# none
|
||||
#
|
||||
# Returns:
|
||||
# sql_modes as a string (coma separated values)
|
||||
# sql_modes as a string (coma separated values)
|
||||
sub get_modes_string {
|
||||
my ( $self ) = @_;
|
||||
|
||||
@@ -192,8 +192,8 @@ sub get_modes_string {
|
||||
return $sql_mode_string;
|
||||
}
|
||||
|
||||
# Sub: restore_original_modes
|
||||
# resets sql_mode to the state it was when object was created
|
||||
# Sub: restore_original_modes
|
||||
# resets sql_mode to the state it was when object was created
|
||||
#
|
||||
# Required Arguments:
|
||||
# none
|
||||
|
@@ -145,7 +145,7 @@ sub parse_event {
|
||||
# If $timestamp is not within the current interval, then we need to save
|
||||
# everything for later, compute stats for the rest of this interval, and
|
||||
# return an event. The next time we are called, we'll begin the next
|
||||
# interval.
|
||||
# interval.
|
||||
if ( $t_start > $self->{t_start} ) {
|
||||
PTDEBUG && _d("Timestamp doesn't belong to this interval");
|
||||
# We need to compute how much time is left in this interval, and add
|
||||
|
@@ -326,7 +326,7 @@ sub make_row_checksum {
|
||||
my $query;
|
||||
if ( !$args{no_cols} ) {
|
||||
$query = join(', ',
|
||||
map {
|
||||
map {
|
||||
my $col = $_;
|
||||
if ( $col =~ m/\+ 0/ ) {
|
||||
# Alias col name back to itself else its name becomes
|
||||
|
@@ -25,7 +25,7 @@
|
||||
# types. Any numeric column type that MySQL can do positional comparisons
|
||||
# (<, <=, >, >=) on works. Chunking on character data is not supported yet
|
||||
# (but see <issue 568 at http://code.google.com/p/maatkit/issues/detail?id=568>).
|
||||
#
|
||||
#
|
||||
# Usually chunks range over all rows in a table but sometimes they only
|
||||
# range over a subset of rows if an optional where arg is passed to various
|
||||
# subs. In either case a chunk is like "`col` >= 5 AND `col` < 10". If
|
||||
@@ -133,10 +133,10 @@ sub find_chunk_columns {
|
||||
PTDEBUG && _d('Possible chunk indexes in order:',
|
||||
join(', ', map { $_->{name} } @possible_indexes));
|
||||
|
||||
# Build list of candidate chunk columns.
|
||||
# Build list of candidate chunk columns.
|
||||
my $can_chunk_exact = 0;
|
||||
my @candidate_cols;
|
||||
foreach my $index ( @possible_indexes ) {
|
||||
foreach my $index ( @possible_indexes ) {
|
||||
my $col = $index->{cols}->[0];
|
||||
|
||||
# Accept only integer or real number type columns or character columns.
|
||||
@@ -373,7 +373,7 @@ sub _chunk_numeric {
|
||||
my $col_type = $args{tbl_struct}->{type_for}->{$args{chunk_col}};
|
||||
|
||||
# Convert the given MySQL values to (Perl) numbers using some MySQL function.
|
||||
# E.g.: SELECT TIME_TO_SEC('12:34') == 45240.
|
||||
# E.g.: SELECT TIME_TO_SEC('12:34') == 45240.
|
||||
my $range_func;
|
||||
if ( $col_type =~ m/(?:int|year|float|double|decimal)$/ ) {
|
||||
$range_func = 'range_num';
|
||||
@@ -650,7 +650,7 @@ sub _chunk_char {
|
||||
# than not so we use the minimum number of characters to express a chunk
|
||||
# size.
|
||||
$sql = "SELECT MAX(LENGTH($qchunk_col)) FROM $db_tbl "
|
||||
. ($args{where} ? "WHERE $args{where} " : "")
|
||||
. ($args{where} ? "WHERE $args{where} " : "")
|
||||
. "ORDER BY $qchunk_col";
|
||||
PTDEBUG && _d($dbh, $sql);
|
||||
$row = $dbh->selectrow_arrayref($sql);
|
||||
@@ -728,7 +728,7 @@ sub get_first_chunkable_column {
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
|
||||
# First auto-detected chunk col/index. If any combination of preferred
|
||||
# First auto-detected chunk col/index. If any combination of preferred
|
||||
# chunk col or index are specified and are sane, they will overwrite
|
||||
# these defaults. Else, these defaults will be returned.
|
||||
my ($exact, @cols) = $self->find_chunk_columns(%args);
|
||||
@@ -858,7 +858,7 @@ sub size_to_rows {
|
||||
# tries - Fetch up to this many rows to find a valid value
|
||||
#
|
||||
# Returns:
|
||||
# Array: min row value, max row value, rows in range
|
||||
# Array: min row value, max row value, rows in range
|
||||
sub get_range_statistics {
|
||||
my ( $self, %args ) = @_;
|
||||
my @required_args = qw(dbh db tbl chunk_col tbl_struct);
|
||||
@@ -872,7 +872,7 @@ sub get_range_statistics {
|
||||
my $col_type = $args{tbl_struct}->{type_for}->{$col};
|
||||
my $col_is_numeric = $args{tbl_struct}->{is_numeric}->{$col};
|
||||
|
||||
# Quote these once so we don't have to do it again.
|
||||
# Quote these once so we don't have to do it again.
|
||||
my $db_tbl = $q->quote($db, $tbl);
|
||||
$col = $q->quote($col);
|
||||
|
||||
@@ -924,7 +924,7 @@ sub get_range_statistics {
|
||||
|
||||
# Sub: inject_chunks
|
||||
# Create a SQL statement from a query prototype by filling in placeholders.
|
||||
#
|
||||
#
|
||||
# Parameters:
|
||||
# %args - Arguments
|
||||
#
|
||||
@@ -1398,7 +1398,7 @@ sub base_count {
|
||||
|
||||
my @base_powers;
|
||||
for my $power ( 0..$highest_power ) {
|
||||
push @base_powers, ($base**$power) || 1;
|
||||
push @base_powers, ($base**$power) || 1;
|
||||
}
|
||||
|
||||
my @base_multiples;
|
||||
|
@@ -73,7 +73,7 @@ sub generate_asc_stmt {
|
||||
# a nonexistent index.
|
||||
die "Index '$index' does not exist in table"
|
||||
unless exists $tbl_struct->{keys}->{$index};
|
||||
PTDEBUG && _d('Will ascend index', $index);
|
||||
PTDEBUG && _d('Will ascend index', $index);
|
||||
|
||||
# These are the columns we'll ascend.
|
||||
my @asc_cols = @{$tbl_struct->{keys}->{$index}->{cols}};
|
||||
|
@@ -111,7 +111,7 @@ sub get_create_table {
|
||||
# Table: city
|
||||
# Create Table: CREATE TABLE `city` (
|
||||
# `city_id` smallint(5) unsigned NOT NULL AUTO_INCREMENT,
|
||||
# ...
|
||||
# ...
|
||||
# We want the second column.
|
||||
my ($key) = grep { m/create (?:table|view)/i } keys %$href;
|
||||
if ( !$key ) {
|
||||
@@ -221,9 +221,9 @@ sub parse {
|
||||
sub remove_quoted_text {
|
||||
my ($string) = @_;
|
||||
$string =~ s/\\['"]//g;
|
||||
$string =~ s/`[^`]*?`//g;
|
||||
$string =~ s/"[^"]*?"//g;
|
||||
$string =~ s/'[^']*?'//g;
|
||||
$string =~ s/`[^`]*?`//g;
|
||||
$string =~ s/"[^"]*?"//g;
|
||||
$string =~ s/'[^']*?'//g;
|
||||
return $string;
|
||||
}
|
||||
|
||||
|
@@ -353,7 +353,7 @@ sub __get_explain_index {
|
||||
PTDEBUG && _d($EVAL_ERROR);
|
||||
return;
|
||||
}
|
||||
PTDEBUG && _d('EXPLAIN key:', $explain->[0]->{key});
|
||||
PTDEBUG && _d('EXPLAIN key:', $explain->[0]->{key});
|
||||
return $explain->[0]->{key};
|
||||
}
|
||||
|
||||
|
@@ -183,7 +183,7 @@ sub sync_table {
|
||||
# checksum algo and hash func can't be used on both.
|
||||
die "Failed to make checksum queries: $EVAL_ERROR";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# ########################################################################
|
||||
# Plugin is ready, return now if this is a dry run.
|
||||
|
@@ -241,7 +241,7 @@ sub _get_tables_used_from_query_struct {
|
||||
PTDEBUG && _d("Using EXPLAIN EXTENDED to disambiguate columns");
|
||||
if ( $self->_reparse_query(%args) ) {
|
||||
return $self->_get_tables_used_from_query_struct(%args);
|
||||
}
|
||||
}
|
||||
PTDEBUG && _d('Failed to disambiguate columns');
|
||||
}
|
||||
}
|
||||
@@ -333,7 +333,7 @@ sub _get_tables_used_from_query_struct {
|
||||
PTDEBUG && _d("Using EXPLAIN EXTENDED to disambiguate columns");
|
||||
if ( $self->_reparse_query(%args) ) {
|
||||
return $self->_get_tables_used_from_query_struct(%args);
|
||||
}
|
||||
}
|
||||
PTDEBUG && _d('Failed to disambiguate columns');
|
||||
}
|
||||
|
||||
@@ -390,8 +390,8 @@ sub _get_tables_used_from_query_struct {
|
||||
"to disambiguate columns");
|
||||
if ( $self->_reparse_query(%args) ) {
|
||||
return $self->_get_tables_used_from_query_struct(%args);
|
||||
}
|
||||
PTDEBUG && _d('Failed to disambiguate columns');
|
||||
}
|
||||
PTDEBUG && _d('Failed to disambiguate columns');
|
||||
}
|
||||
|
||||
foreach my $joined_table ( @{$on_tables->{joined_tables}} ) {
|
||||
@@ -799,7 +799,7 @@ sub _qualify_table_name {
|
||||
}
|
||||
|
||||
# Last resort: use default db if it's given.
|
||||
if ( !$db_tbl && $args{default_db} ) {
|
||||
if ( !$db_tbl && $args{default_db} ) {
|
||||
$db_tbl = "$args{default_db}.$tbl";
|
||||
}
|
||||
|
||||
@@ -846,7 +846,7 @@ sub _explain_query {
|
||||
}
|
||||
|
||||
$self->{db_version} ||= VersionParser->new($dbh);
|
||||
if ( $self->{db_version} < '5.7.3' ) {
|
||||
if ( $self->{db_version} < '5.7.3' ) {
|
||||
$sql = "EXPLAIN EXTENDED $query";
|
||||
}
|
||||
else {
|
||||
|
@@ -92,7 +92,7 @@ sub parse_event {
|
||||
$raw_packet = "20$raw_packet" unless $raw_packet =~ m/\A20/;
|
||||
|
||||
# Remove special headers (e.g. vlan) before the IPv4 header.
|
||||
# The vast majority of IPv4 headers begin with 4508 (or 4500).
|
||||
# The vast majority of IPv4 headers begin with 4508 (or 4500).
|
||||
# http://code.google.com/p/maatkit/issues/detail?id=906
|
||||
$raw_packet =~ s/0x0000:.+?(450.) /0x0000: $1 /;
|
||||
|
||||
@@ -121,9 +121,9 @@ sub _parse_packet {
|
||||
# Change ports from service name to number.
|
||||
$src_port = $self->port_number($src_port);
|
||||
$dst_port = $self->port_number($dst_port);
|
||||
|
||||
|
||||
my $hex = qr/[0-9a-f]/;
|
||||
(my $data = join('', $packet =~ m/\s+0x$hex+:\s((?:\s$hex{2,4})+)/go)) =~ s/\s+//g;
|
||||
(my $data = join('', $packet =~ m/\s+0x$hex+:\s((?:\s$hex{2,4})+)/go)) =~ s/\s+//g;
|
||||
|
||||
# Find length information in the IPv4 header. Typically 5 32-bit
|
||||
# words. See http://en.wikipedia.org/wiki/IPv4#Header
|
||||
|
@@ -22,13 +22,13 @@
|
||||
# TextResultSetParser converts a text result set to a data struct like
|
||||
# DBI::selectall_arrayref(). Text result sets are like what SHOW PROCESSLIST
|
||||
# and EXPLAIN print, like:
|
||||
#
|
||||
#
|
||||
# +----+------+
|
||||
# | Id | User |
|
||||
# +----+------+
|
||||
# | 1 | bob |
|
||||
# +----+------+
|
||||
#
|
||||
#
|
||||
# That converts to:
|
||||
# (start code)
|
||||
# [
|
||||
|
@@ -349,7 +349,7 @@ sub value_to_json {
|
||||
|
||||
my $b_obj = B::svref_2object(\$value); # for round trip problem
|
||||
my $flags = $b_obj->FLAGS;
|
||||
return $value # as is
|
||||
return $value # as is
|
||||
if $flags & ( B::SVp_IOK | B::SVp_NOK ) and !( $flags & B::SVp_POK ); # SvTYPE is IV or NV?
|
||||
|
||||
my $type = ref($value);
|
||||
|
@@ -220,7 +220,7 @@ sub report_unreported_classes {
|
||||
class => $class,
|
||||
reasons => ["$reason, but hasn't been reported yet"],
|
||||
);
|
||||
$class->{reported} = 1;
|
||||
$class->{reported} = 1;
|
||||
};
|
||||
if ( $EVAL_ERROR ) {
|
||||
$success = 1;
|
||||
@@ -268,7 +268,7 @@ sub report_if_ready {
|
||||
class => $class,
|
||||
reasons => \@report_reasons,
|
||||
);
|
||||
$class->{reported} = 1;
|
||||
$class->{reported} = 1;
|
||||
}
|
||||
|
||||
return;
|
||||
@@ -483,13 +483,13 @@ sub _format_warnings {
|
||||
my $warn1 = $warn->[1];
|
||||
my $warn2 = $warn->[2];
|
||||
my $host1_warn
|
||||
= $warn1 ? sprintf $warning_format,
|
||||
= $warn1 ? sprintf $warning_format,
|
||||
($warn1->{Code} || $warn1->{code} || '?'),
|
||||
($warn1->{Level} || $warn1->{level} || '?'),
|
||||
($warn1->{Message} || $warn1->{message} || '?')
|
||||
: "No warning $code\n";
|
||||
my $host2_warn
|
||||
= $warn2 ? sprintf $warning_format,
|
||||
= $warn2 ? sprintf $warning_format,
|
||||
($warn2->{Code} || $warn2->{code} || '?'),
|
||||
($warn2->{Level} || $warn2->{level} || '?'),
|
||||
($warn2->{Message} || $warn2->{message} || '?')
|
||||
|
@@ -367,7 +367,7 @@ sub get_rules {
|
||||
return _var_gt($args{variables}->{relay_log_space_limit}, 0);
|
||||
},
|
||||
},
|
||||
|
||||
|
||||
{
|
||||
id => 'slave_net_timeout',
|
||||
code => sub {
|
||||
|
@@ -73,7 +73,7 @@ my @vc_dirs = (
|
||||
}
|
||||
PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD});
|
||||
return $file; # in the CWD
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Return time limit between checks.
|
||||
@@ -112,11 +112,11 @@ sub version_check {
|
||||
PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin);
|
||||
if ( !$args{force} ) {
|
||||
if ( $FindBin::Bin
|
||||
&& (-d "$FindBin::Bin/../.bzr" ||
|
||||
&& (-d "$FindBin::Bin/../.bzr" ||
|
||||
-d "$FindBin::Bin/../../.bzr" ||
|
||||
-d "$FindBin::Bin/../.git" ||
|
||||
-d "$FindBin::Bin/../../.git"
|
||||
)
|
||||
-d "$FindBin::Bin/../.git" ||
|
||||
-d "$FindBin::Bin/../../.git"
|
||||
)
|
||||
) {
|
||||
PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check");
|
||||
return;
|
||||
@@ -145,7 +145,7 @@ sub version_check {
|
||||
return unless @$instances_to_check;
|
||||
|
||||
# Skip Version Check altogether if SSL not available
|
||||
my $protocol = 'https';
|
||||
my $protocol = 'https';
|
||||
eval { require IO::Socket::SSL; };
|
||||
if ( $EVAL_ERROR ) {
|
||||
PTDEBUG && _d($EVAL_ERROR);
|
||||
@@ -330,7 +330,7 @@ sub get_instance_id {
|
||||
}
|
||||
|
||||
|
||||
# This function has been implemented solely to be able to count individual
|
||||
# This function has been implemented solely to be able to count individual
|
||||
# Toolkit users for statistics. It uses a random UUID, no client info is
|
||||
# being gathered nor stored
|
||||
sub get_uuid {
|
||||
@@ -354,7 +354,7 @@ sub get_uuid {
|
||||
}
|
||||
|
||||
return $uuid;
|
||||
}
|
||||
}
|
||||
|
||||
sub _generate_uuid {
|
||||
return sprintf+($}="%04x")."$}-$}-$}-$}-".$}x3,map rand 65537,0..7;
|
||||
@@ -421,7 +421,7 @@ sub pingback {
|
||||
);
|
||||
die "Failed to parse server requested programs: $response->{content}"
|
||||
if !scalar keys %$items;
|
||||
|
||||
|
||||
# Get the versions for those items in another hashref also keyed on
|
||||
# the items like:
|
||||
# "MySQL" => "MySQL Community Server 5.1.49-log",
|
||||
@@ -706,12 +706,12 @@ sub get_from_mysql {
|
||||
return;
|
||||
}
|
||||
|
||||
# Only allow version variables to be reported
|
||||
# Only allow version variables to be reported
|
||||
# So in case of MITM attack, we don't report sensitive data
|
||||
if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') {
|
||||
@{$item->{vars}} = grep { $_ eq 'version' || $_ eq 'version_comment' } @{$item->{vars}};
|
||||
}
|
||||
|
||||
|
||||
|
||||
my @versions;
|
||||
my %version_for;
|
||||
|
@@ -32,30 +32,30 @@ use constant PTDEBUG => $ENV{PTDEBUG} || 0;
|
||||
sub cmp {
|
||||
my ($v1, $v2) = @_;
|
||||
|
||||
# Remove all but numbers and dots.
|
||||
# Remove all but numbers and dots.
|
||||
# Assume simple 1.2.3 style
|
||||
$v1 =~ s/[^\d\.]//;
|
||||
$v2 =~ s/[^\d\.]//;
|
||||
|
||||
my @a = ( $v1 =~ /(\d+)\.?/g );
|
||||
my @b = ( $v2 =~ /(\d+)\.?/g );
|
||||
my @a = ( $v1 =~ /(\d+)\.?/g );
|
||||
my @b = ( $v2 =~ /(\d+)\.?/g );
|
||||
foreach my $n1 (@a) {
|
||||
$n1 += 0; #convert to number
|
||||
if (!@b) {
|
||||
# b ran out of digits, a is larger
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
my $n2 = shift @b;
|
||||
$n2 += 0; # convert to number
|
||||
if ($n1 == $n2) {
|
||||
# still tied?, fetch next
|
||||
# still tied?, fetch next
|
||||
next;
|
||||
}
|
||||
else {
|
||||
# difference! return result
|
||||
return $n1 <=> $n2;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
# b still has digits? it's larger, else it's a tie
|
||||
return @b ? -1 : 0;
|
||||
}
|
||||
|
@@ -50,11 +50,11 @@ _lsof() {
|
||||
# they are somewhere
|
||||
|
||||
# TODO:
|
||||
# we just need to redirect STDERR when we execute
|
||||
# "which" and check it. Some shells are really weird this way. We
|
||||
# can't check "which"'s exit status because it will be nonzero if
|
||||
# we just need to redirect STDERR when we execute
|
||||
# "which" and check it. Some shells are really weird this way. We
|
||||
# can't check "which"'s exit status because it will be nonzero if
|
||||
# the sought-for command doesn't exist.
|
||||
#
|
||||
#
|
||||
_which() {
|
||||
# which on CentOS is aliased to a cmd that prints extra stuff.
|
||||
# Also, if the cmd isn't found, a msg is printed to stderr.
|
||||
|
@@ -50,7 +50,7 @@ collect() {
|
||||
local p="$2" # prefix for each result file
|
||||
|
||||
local cnt=$(($OPT_RUN_TIME / $OPT_SLEEP_COLLECT))
|
||||
|
||||
|
||||
if [ ! "$OPT_SYSTEM_ONLY" ]; then
|
||||
local mysqld_pid=""
|
||||
local mysql_version=""
|
||||
@@ -68,10 +68,10 @@ collect() {
|
||||
fi
|
||||
|
||||
# Grab a few general things first. Background all of these so we can start
|
||||
# them all up as quickly as possible.
|
||||
if [ ! "$OPT_MYSQL_ONLY" ]; then
|
||||
# them all up as quickly as possible.
|
||||
if [ ! "$OPT_MYSQL_ONLY" ]; then
|
||||
collect_system_data
|
||||
fi
|
||||
fi
|
||||
|
||||
# This loop gathers data for the rest of the duration, and defines the time
|
||||
# of the whole job.
|
||||
@@ -172,7 +172,7 @@ collect_mysql_data_one() {
|
||||
$CMD_MYSQLADMIN $EXT_ARGV
|
||||
else
|
||||
log "Could not find the MySQL error log"
|
||||
fi
|
||||
fi
|
||||
# Get a sample of these right away, so we can get these without interaction
|
||||
# with the other commands we're about to run.
|
||||
if [ "${mysql_version}" '>' "5.1" ]; then
|
||||
@@ -255,7 +255,7 @@ collect_system_data() {
|
||||
if [ "$CMD_DMESG" ]; then
|
||||
local UPTIME=`cat /proc/uptime | awk '{ print $1 }'`
|
||||
local START_TIME=$(echo "$UPTIME 60" | awk '{print ($1 - $2)}')
|
||||
$CMD_DMESG | perl -ne 'm/\[\s*(\d+)\./; if ($1 > '${START_TIME}') { print }' >> "$d/$p-dmesg" &
|
||||
$CMD_DMESG | perl -ne 'm/\[\s*(\d+)\./; if ($1 > '${START_TIME}') { print }' >> "$d/$p-dmesg" &
|
||||
fi
|
||||
|
||||
if [ "$CMD_VMSTAT" ]; then
|
||||
@@ -288,7 +288,7 @@ collect_mysql_data_loop() {
|
||||
(echo $ts; ps_prepared_statements "$d/prepared_statements.isrunnning") >> "$d/$p-prepared-statements" &
|
||||
fi
|
||||
|
||||
slave_status "$d/$p-slave-status" "${mysql_version}"
|
||||
slave_status "$d/$p-slave-status" "${mysql_version}"
|
||||
}
|
||||
|
||||
collect_system_data_loop() {
|
||||
@@ -473,7 +473,7 @@ lock_waits() {
|
||||
|
||||
rm "$flag_file"
|
||||
fi
|
||||
}
|
||||
}
|
||||
|
||||
transactions() {
|
||||
$CMD_MYSQL $EXT_ARGV -e "SELECT SQL_NO_CACHE * FROM INFORMATION_SCHEMA.INNODB_TRX ORDER BY trx_id\G"
|
||||
@@ -532,8 +532,8 @@ rocksdb_status() {
|
||||
}
|
||||
|
||||
ps_locks_transactions() {
|
||||
local outfile=$1
|
||||
|
||||
local outfile=$1
|
||||
|
||||
$CMD_MYSQL $EXT_ARGV -e 'select @@performance_schema' | grep "1" &>/dev/null
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
@@ -606,7 +606,7 @@ slave_status() {
|
||||
}
|
||||
|
||||
collect_mysql_variables() {
|
||||
local outfile=$1
|
||||
local outfile=$1
|
||||
|
||||
local sql="SHOW GLOBAL VARIABLES"
|
||||
echo -e "\n$sql\n" >> $outfile
|
||||
@@ -615,11 +615,11 @@ collect_mysql_variables() {
|
||||
sql="select * from performance_schema.variables_by_thread order by thread_id, variable_name;"
|
||||
echo -e "\n$sql\n" >> $outfile
|
||||
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
|
||||
|
||||
|
||||
sql="select * from performance_schema.user_variables_by_thread order by thread_id, variable_name;"
|
||||
echo -e "\n$sql\n" >> $outfile
|
||||
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
|
||||
|
||||
|
||||
sql="select * from performance_schema.status_by_thread order by thread_id, variable_name; "
|
||||
echo -e "\n$sql\n" >> $outfile
|
||||
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
|
||||
|
@@ -211,7 +211,7 @@ collect_internal_vars () {
|
||||
if [ $GENERAL_JEMALLOC_STATUS -eq 1 ]; then
|
||||
JEMALLOC_LOCATION=$(find /usr/lib64/ /usr/lib/x86_64-linux-gnu /usr/lib -name "libjemalloc.*" 2>/dev/null | head -n 1)
|
||||
echo "pt-summary-internal-jemalloc_location ${JEMALLOC_LOCATION}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
collect_keyring_plugins() {
|
||||
@@ -224,7 +224,7 @@ collect_encrypted_tables() {
|
||||
|
||||
collect_encrypted_tablespaces() {
|
||||
local version="$1"
|
||||
# I_S.INNODB_[SYS_]TABLESPACES has a "flag" field. Encrypted tablespace has bit 14 set. You can check it with "flag & 8192".
|
||||
# I_S.INNODB_[SYS_]TABLESPACES has a "flag" field. Encrypted tablespace has bit 14 set. You can check it with "flag & 8192".
|
||||
# And seems like MySQL is capable of bitwise operations. https://dev.mysql.com/doc/refman/5.7/en/bit-functions.html
|
||||
if [ "$version" '<' "8.0" ]; then
|
||||
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES where FLAG&8192 = 8192;"
|
||||
@@ -294,7 +294,7 @@ collect_mysql_info () {
|
||||
collect_mysql_slave_status > "$dir/mysql-slave"
|
||||
collect_mysql_innodb_status > "$dir/innodb-status"
|
||||
collect_mysql_ndb_status > "$dir/ndb-status"
|
||||
collect_mysql_processlist > "$dir/mysql-processlist"
|
||||
collect_mysql_processlist > "$dir/mysql-processlist"
|
||||
collect_mysql_users > "$dir/mysql-users"
|
||||
collect_mysql_roles > "$dir/mysql-roles"
|
||||
collect_keyring_plugins > "$dir/keyring-plugins"
|
||||
|
@@ -136,7 +136,7 @@ collect_system_data () { local PTFUNCNAME=collect_system_data;
|
||||
|
||||
# Fusion-io cards
|
||||
fio_status_minus_a "$data_dir/fusion-io_card"
|
||||
|
||||
|
||||
# Clean the data directory, don't leave empty files
|
||||
for file in $data_dir/*; do
|
||||
# The vmstat file gets special treatmeant, see above.
|
||||
@@ -175,7 +175,7 @@ fio_status_minus_a () {
|
||||
|
||||
print "${adapter}_general $adapter_general";
|
||||
print "${adapter}_modules @connected_modules";
|
||||
|
||||
|
||||
for my $module (@connected_modules) {
|
||||
my ($rest, $attached, $general, $firmware, $temperature, $media_status) = /(
|
||||
^ \s* $module \s+ (Attached[^\n]+) \n
|
||||
@@ -196,7 +196,7 @@ fio_status_minus_a () {
|
||||
} while <>;
|
||||
|
||||
print "adapters @adapters\n";
|
||||
|
||||
|
||||
exit;
|
||||
EOP
|
||||
|
||||
@@ -226,7 +226,7 @@ linux_exclusive_collection () { local PTFUNCNAME=linux_exclusive_collection;
|
||||
echo "dirtystatus $(awk '/vm.dirty_bytes/{print $3}' "$data_dir/sysctl"), $(awk '/vm.dirty_background_bytes/{print $3}' "$data_dir/sysctl")" >> "$data_dir/summary"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
if [ -e "$data_dir/numactl" ]; then
|
||||
echo "numa-available $(awk '/available/{print $2}' "$data_dir/numactl")" >> "$data_dir/summary"
|
||||
echo "numa-policy $(awk '/policy/{print $2}' "$data_dir/numactl")" >> "$data_dir/summary"
|
||||
@@ -602,7 +602,7 @@ processor_info () { local PTFUNCNAME=processor_info;
|
||||
cat /proc/cpuinfo > "$data_dir/proc_cpuinfo_copy" 2>/dev/null
|
||||
elif [ "${platform}" = "SunOS" ]; then
|
||||
$CMD_PSRINFO -v > "$data_dir/psrinfo_minus_v"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# ########################################################################
|
||||
@@ -631,7 +631,7 @@ proprietary_raid_controller () { local PTFUNCNAME=proprietary_raid_controller;
|
||||
echo "internal::raid_opt 2" >> "$variable_file"
|
||||
fi
|
||||
elif [ "${controller}" = "LSI Logic MegaRAID SAS" ]; then
|
||||
if [ -z "$CMD_MEGACLI64" ]; then
|
||||
if [ -z "$CMD_MEGACLI64" ]; then
|
||||
notfound="your package repository or the manufacturer's website"
|
||||
else
|
||||
echo "internal::raid_opt 3" >> "$variable_file"
|
||||
|
@@ -44,21 +44,21 @@ mysql_options() {
|
||||
if [ -n "$OPT_ASK_PASS" ]; then
|
||||
stty -echo
|
||||
>&2 printf "Enter MySQL password: "
|
||||
read GIVEN_PASS
|
||||
read GIVEN_PASS
|
||||
stty echo
|
||||
printf "\n"
|
||||
MYSQL_ARGS="$MYSQL_ARGS --password=$GIVEN_PASS"
|
||||
elif [ -n "$OPT_PASSWORD" ]; then
|
||||
MYSQL_ARGS="$MYSQL_ARGS --password=$OPT_PASSWORD"
|
||||
fi
|
||||
|
||||
|
||||
echo $MYSQL_ARGS
|
||||
}
|
||||
|
||||
# This basically makes sure that --defaults-file comes first
|
||||
arrange_mysql_options() {
|
||||
local opts="$1"
|
||||
|
||||
|
||||
local rearranged=""
|
||||
for opt in $opts; do
|
||||
if [ "$(echo $opt | awk -F= '{print $1}')" = "--defaults-file" ]; then
|
||||
@@ -67,7 +67,7 @@ arrange_mysql_options() {
|
||||
rearranged="$rearranged $opt"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
echo "$rearranged"
|
||||
}
|
||||
|
||||
|
@@ -239,7 +239,7 @@ _parse_pod() {
|
||||
|
||||
# Parse the program options (po) from the POD. Each option has
|
||||
# a spec file like:
|
||||
# $ cat po/string-opt2
|
||||
# $ cat po/string-opt2
|
||||
# long=string-opt2
|
||||
# type=string
|
||||
# default=foo
|
||||
@@ -313,7 +313,7 @@ _eval_po() {
|
||||
*)
|
||||
echo "Invalid attribute in $opt_spec: $line" >&2
|
||||
exit 1
|
||||
esac
|
||||
esac
|
||||
done < "$opt_spec"
|
||||
|
||||
if [ -z "$opt" ]; then
|
||||
@@ -504,7 +504,7 @@ _parse_command_line() {
|
||||
if [ "$val" ]; then
|
||||
option_error "Option $real_opt does not take a value"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
if [ "$opt_is_negated" ]; then
|
||||
val=""
|
||||
else
|
||||
|
@@ -147,7 +147,7 @@ parse_mysqld_instances () {
|
||||
defaults_file="$(echo "${word}" | cut -d= -f2)"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [ -n "${defaults_file:-""}" -a -r "${defaults_file:-""}" ]; then
|
||||
socket="${socket:-"$(grep "^socket\>" "$defaults_file" | tail -n1 | cut -d= -f2 | sed 's/^[ \t]*//;s/[ \t]*$//')"}"
|
||||
port="${port:-"$(grep "^port\>" "$defaults_file" | tail -n1 | cut -d= -f2 | sed 's/^[ \t]*//;s/[ \t]*$//')"}"
|
||||
@@ -162,7 +162,7 @@ parse_mysqld_instances () {
|
||||
oom="?"
|
||||
fi
|
||||
printf " %5s %-26s %-4s %-3s %s\n" "${port}" "${datadir}" "${nice:-"?"}" "${oom:-"?"}" "${socket}"
|
||||
|
||||
|
||||
# Need to unset all of them in case the next process uses --defaults-file
|
||||
defaults_file=""
|
||||
socket=""
|
||||
@@ -317,7 +317,7 @@ summarize_processlist () {
|
||||
}
|
||||
\$1 == \"Time:\" {
|
||||
t = \$2;
|
||||
if ( t == \"NULL\" ) {
|
||||
if ( t == \"NULL\" ) {
|
||||
t = 0;
|
||||
}
|
||||
}
|
||||
@@ -356,15 +356,15 @@ pretty_print_cnf_file () {
|
||||
|
||||
perl -n -l -e '
|
||||
my $line = $_;
|
||||
if ( $line =~ /^\s*[a-zA-Z[]/ ) {
|
||||
if ( $line=~/\s*(.*?)\s*=\s*(.*)\s*$/ ) {
|
||||
printf("%-35s = %s\n", $1, $2)
|
||||
}
|
||||
elsif ( $line =~ /\s*\[/ ) {
|
||||
print "\n$line"
|
||||
if ( $line =~ /^\s*[a-zA-Z[]/ ) {
|
||||
if ( $line=~/\s*(.*?)\s*=\s*(.*)\s*$/ ) {
|
||||
printf("%-35s = %s\n", $1, $2)
|
||||
}
|
||||
elsif ( $line =~ /\s*\[/ ) {
|
||||
print "\n$line"
|
||||
} else {
|
||||
print $line
|
||||
}
|
||||
}
|
||||
}' "$file"
|
||||
|
||||
while read line; do
|
||||
@@ -583,7 +583,7 @@ format_ndb_status() {
|
||||
local file=$1
|
||||
|
||||
[ -e "$file" ] || return
|
||||
# We could use "& \n" but that does not seem to work on bsd sed.
|
||||
# We could use "& \n" but that does not seem to work on bsd sed.
|
||||
egrep '^[ \t]*Name:|[ \t]*Status:' $file|sed 's/^[ \t]*//g'|while read line; do echo $line; echo $line | grep '^Status:'>/dev/null && echo ; done
|
||||
}
|
||||
|
||||
@@ -591,7 +591,7 @@ format_keyring_plugins() {
|
||||
local keyring_plugins="$1"
|
||||
local encrypted_tables="$2"
|
||||
|
||||
if [ -z "$keyring_plugins" ]; then
|
||||
if [ -z "$keyring_plugins" ]; then
|
||||
echo "No keyring plugins found"
|
||||
if [ ! -z "$encrypted_tables" ]; then
|
||||
echo "Warning! There are encrypted tables but keyring plugins are not loaded"
|
||||
@@ -948,7 +948,7 @@ section_percona_server_features () {
|
||||
# Renamed to innodb_buffer_pool_restore_at_startup in 5.5.10-20.1
|
||||
name_val "Fast Server Restarts" \
|
||||
"$(feat_on_renamed "$file" innodb_auto_lru_dump innodb_buffer_pool_restore_at_startup)"
|
||||
|
||||
|
||||
name_val "Enhanced Logging" \
|
||||
"$(feat_on "$file" log_slow_verbosity ne microtime)"
|
||||
name_val "Replica Perf Logging" \
|
||||
@@ -970,7 +970,7 @@ section_percona_server_features () {
|
||||
fi
|
||||
fi
|
||||
name_val "Smooth Flushing" "$smooth_flushing"
|
||||
|
||||
|
||||
name_val "HandlerSocket NoSQL" \
|
||||
"$(feat_on "$file" handlersocket_port)"
|
||||
name_val "Fast Hash UDFs" \
|
||||
@@ -1133,7 +1133,7 @@ _semi_sync_stats_for () {
|
||||
trace_extra="Unknown setting"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
name_val "${target} semisync status" "${semisync_status}"
|
||||
name_val "${target} trace level" "${semisync_trace}, ${trace_extra}"
|
||||
|
||||
@@ -1249,10 +1249,10 @@ section_percona_xtradb_cluster () {
|
||||
|
||||
name_val "SST Method" "$(get_var "wsrep_sst_method" "$mysql_var")"
|
||||
name_val "Slave Threads" "$(get_var "wsrep_slave_threads" "$mysql_var")"
|
||||
|
||||
|
||||
name_val "Ignore Split Brain" "$( parse_wsrep_provider_options "pc.ignore_sb" "$mysql_var" )"
|
||||
name_val "Ignore Quorum" "$( parse_wsrep_provider_options "pc.ignore_quorum" "$mysql_var" )"
|
||||
|
||||
|
||||
name_val "gcache Size" "$( parse_wsrep_provider_options "gcache.size" "$mysql_var" )"
|
||||
name_val "gcache Directory" "$( parse_wsrep_provider_options "gcache.dir" "$mysql_var" )"
|
||||
name_val "gcache Name" "$( parse_wsrep_provider_options "gcache.name" "$mysql_var" )"
|
||||
@@ -1275,13 +1275,13 @@ report_jemalloc_enabled() {
|
||||
local instances_file="$1"
|
||||
local variables_file="$2"
|
||||
local GENERAL_JEMALLOC_STATUS=0
|
||||
|
||||
|
||||
for pid in $(grep '/mysqld ' "$instances_file" | awk '{print $1;}'); do
|
||||
local jemalloc_status="$(get_var "pt-summary-internal-jemalloc_enabled_for_pid_${pid}" "${variables_file}")"
|
||||
if [ -z $jemalloc_status ]; then
|
||||
continue
|
||||
elif [ $jemalloc_status = 0 ]; then
|
||||
echo "jemalloc is not enabled in mysql config for process with id ${pid}"
|
||||
echo "jemalloc is not enabled in mysql config for process with id ${pid}"
|
||||
else
|
||||
echo "jemalloc enabled in mysql config for process with id ${pid}"
|
||||
GENERAL_JEMALLOC_STATUS=1
|
||||
|
@@ -19,7 +19,7 @@
|
||||
# ###########################################################################
|
||||
|
||||
# Package: report_system_info
|
||||
#
|
||||
#
|
||||
|
||||
set -u
|
||||
|
||||
@@ -28,7 +28,7 @@ set -u
|
||||
# These are called from within report_system_summary() and are separated so
|
||||
# they can be tested easily.
|
||||
# ##############################################################################
|
||||
|
||||
|
||||
# ##############################################################################
|
||||
# Parse Linux's /proc/cpuinfo.
|
||||
# ##############################################################################
|
||||
@@ -254,13 +254,13 @@ parse_numactl () { local PTFUNCNAME=parse_numactl;
|
||||
-e '/node[[:digit:]]/p' \
|
||||
"${file}" \
|
||||
| sort -r \
|
||||
| awk '$1 == cnode {
|
||||
| awk '$1 == cnode {
|
||||
if (NF > 4) { for(i=3;i<=NF;i++){printf("%s ", $i)} printf "\n" }
|
||||
else { printf("%-12s", $3" "$4); }
|
||||
}
|
||||
$1 != cnode { cnode = $1; printf(" %-8s", $1); printf("%-12s", $3" "$4); }'
|
||||
$1 != cnode { cnode = $1; printf(" %-8s", $1); printf("%-12s", $3" "$4); }'
|
||||
|
||||
echo
|
||||
echo
|
||||
}
|
||||
|
||||
# ##############################################################################
|
||||
@@ -901,7 +901,7 @@ section_Memory () {
|
||||
name_val "Numa Nodes" "$(get_var "numa-available" "$data_dir/summary")"
|
||||
name_val "Numa Policy" "$(get_var "numa-policy" "$data_dir/summary")"
|
||||
name_val "Preferred Node" "$(get_var "numa-preferred-node" "$data_dir/summary")"
|
||||
|
||||
|
||||
parse_numactl "$data_dir/numactl"
|
||||
fi
|
||||
|
||||
@@ -928,7 +928,7 @@ report_fio_minus_a () {
|
||||
local file="$1"
|
||||
|
||||
name_val "fio Driver" "$(get_var driver_version "$file")"
|
||||
|
||||
|
||||
local adapters="$( get_var "adapters" "$file" )"
|
||||
for adapter in $( echo $adapters | awk '{for (i=1; i<=NF; i++) print $i;}' ); do
|
||||
local adapter_for_output="$(echo "$adapter" | sed 's/::[0-9]*$//' | tr ':' ' ')"
|
||||
@@ -1009,7 +1009,7 @@ report_system_summary () { local PTFUNCNAME=report_system_summary;
|
||||
section "Fusion-io Card"
|
||||
report_fio_minus_a "$data_dir/fusion-io_card"
|
||||
fi
|
||||
|
||||
|
||||
if [ -s "$data_dir/mounted_fs" ]; then
|
||||
section "Mounted Filesystems"
|
||||
parse_filesystems "$data_dir/mounted_fs" "${platform}"
|
||||
|
Reference in New Issue
Block a user