mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-02 02:34:19 +00:00
Compare commits
155 Commits
v3.7.0
...
PT-2465-Fi
Author | SHA1 | Date | |
---|---|---|---|
![]() |
06d5966106 | ||
![]() |
d5d39a83fa | ||
![]() |
6fc8f66792 | ||
![]() |
71c8d866a5 | ||
![]() |
6cff1dfe77 | ||
![]() |
9b83a7dc74 | ||
![]() |
64157c1fad | ||
![]() |
0c7e5d749c | ||
![]() |
b3bf684f39 | ||
![]() |
6043df2ce4 | ||
![]() |
9726e2bfc6 | ||
![]() |
3d9d7acccb | ||
![]() |
37779c4364 | ||
![]() |
6e28207e89 | ||
![]() |
528007e2b0 | ||
![]() |
d5f091cb9c | ||
![]() |
8b4065c4d3 | ||
![]() |
42f74af5ba | ||
![]() |
14eff5f1df | ||
![]() |
46f5a2d220 | ||
![]() |
edef468f8e | ||
![]() |
fa709c9064 | ||
![]() |
11e4588a73 | ||
![]() |
a63e3212f0 | ||
![]() |
2b2289f9f5 | ||
![]() |
dc4aee8dfd | ||
![]() |
b1279680fd | ||
![]() |
21b7edea4f | ||
![]() |
f31ea35421 | ||
![]() |
554d8294e1 | ||
![]() |
7ad2e73e5d | ||
![]() |
a0a1e1decf | ||
![]() |
55f2167ed0 | ||
![]() |
6e7a867fed | ||
![]() |
fd2f0f94b1 | ||
![]() |
2201e7f97d | ||
![]() |
2383a399a6 | ||
![]() |
a66fe3ebf6 | ||
![]() |
d81adddfa4 | ||
![]() |
84095fd7d7 | ||
![]() |
47b8c5b067 | ||
![]() |
7717cfe4f1 | ||
![]() |
b98ec7baa7 | ||
![]() |
f9d9a993e9 | ||
![]() |
7f322f7cbd | ||
![]() |
3ff98c20bc | ||
![]() |
f816053065 | ||
![]() |
34a14ec77e | ||
![]() |
d2db8f5789 | ||
![]() |
e940d154c0 | ||
![]() |
ac53883f29 | ||
![]() |
808c590e7a | ||
![]() |
c09b622c3e | ||
![]() |
ef2dbd887f | ||
![]() |
2df1bd8950 | ||
![]() |
ebacadf098 | ||
![]() |
c49c58db2b | ||
![]() |
9711db87a7 | ||
![]() |
905490bac5 | ||
![]() |
e964e17f21 | ||
![]() |
c3a201d5f8 | ||
![]() |
2474b1f45b | ||
![]() |
88367c1dea | ||
![]() |
840ba6926b | ||
![]() |
25f4ee6d80 | ||
![]() |
c83d2f547d | ||
![]() |
e4cecc3e69 | ||
![]() |
f9ea94f195 | ||
![]() |
c92d95bc38 | ||
![]() |
6b449ec081 | ||
![]() |
af7bd8abd6 | ||
![]() |
6fad1f0ff0 | ||
![]() |
f4a324581a | ||
![]() |
3cb46e61f7 | ||
![]() |
2198763042 | ||
![]() |
4bf48d864f | ||
![]() |
16f5aac023 | ||
![]() |
69cbfca27f | ||
![]() |
14623c5dce | ||
![]() |
71ffb19e9e | ||
![]() |
5c16d37020 | ||
![]() |
f70f8084dd | ||
![]() |
5474f5d5ff | ||
![]() |
61915d615c | ||
![]() |
201a0d9b18 | ||
![]() |
d0f8fb231b | ||
![]() |
8b61618d35 | ||
![]() |
7887b8f760 | ||
![]() |
1d5788c3e4 | ||
![]() |
a615a82f1f | ||
![]() |
6587df60b7 | ||
![]() |
1511d4cef0 | ||
![]() |
9ad6dc0125 | ||
![]() |
e72235e696 | ||
![]() |
0e1edaed97 | ||
![]() |
6af18b94b0 | ||
![]() |
1641438412 | ||
![]() |
28abea52e7 | ||
![]() |
4f678621c6 | ||
![]() |
b7cf75c37e | ||
![]() |
30b8f4227f | ||
![]() |
330ca87457 | ||
![]() |
6ca0b1d6fc | ||
![]() |
33af9cc021 | ||
![]() |
cf11056f98 | ||
![]() |
71a164c272 | ||
![]() |
beebe501ec | ||
![]() |
260ca8151d | ||
![]() |
9225369a73 | ||
![]() |
2b78478272 | ||
![]() |
ee5ad88e2f | ||
![]() |
74a14966c6 | ||
![]() |
38dfed8c1c | ||
![]() |
d78e4a1396 | ||
![]() |
80bc02916d | ||
![]() |
f312679c13 | ||
![]() |
70ac4351f3 | ||
![]() |
75cddb43db | ||
![]() |
5972093d87 | ||
![]() |
6c2dec8502 | ||
![]() |
9e9f7434d1 | ||
![]() |
888af5f5ef | ||
![]() |
dc77289d60 | ||
![]() |
d82723f272 | ||
![]() |
ad812fd14c | ||
![]() |
680431cb06 | ||
![]() |
ffb4057d77 | ||
![]() |
582df3cd02 | ||
![]() |
6c29e975e2 | ||
![]() |
b603ef30c2 | ||
![]() |
04f8f55489 | ||
![]() |
95285428bb | ||
![]() |
ac3ed1f5a3 | ||
![]() |
035213b876 | ||
![]() |
a06b3877c8 | ||
![]() |
e716a03060 | ||
![]() |
5a4bd2f1da | ||
![]() |
6f62b4dc63 | ||
![]() |
0a98a81370 | ||
![]() |
f837672d1d | ||
![]() |
8be499a85f | ||
![]() |
6dd7d1125a | ||
![]() |
0342aace44 | ||
![]() |
97c732afa5 | ||
![]() |
1e581be06a | ||
![]() |
3e69100477 | ||
![]() |
c796b2c148 | ||
![]() |
1bec1fdde8 | ||
![]() |
ce20bc7dd1 | ||
![]() |
16b06dcfc2 | ||
![]() |
10ed817eda | ||
![]() |
e45ae53d67 | ||
![]() |
d5ec5d9ca8 | ||
![]() |
bcbb4e59ab | ||
![]() |
5f14441d19 |
4
.github/workflows/toolkit.yml
vendored
4
.github/workflows/toolkit.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- name: Build the Docker image
|
||||
run: echo "FROM oraclelinux:9-slim" > Dockerfile; echo "RUN microdnf -y update" >> Dockerfile; echo "COPY bin/* /usr/bin/" >> Dockerfile; docker build . --file Dockerfile --tag percona-toolkit:${{ github.sha }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.31.0
|
||||
with:
|
||||
image-ref: 'percona-toolkit:${{ github.sha }}'
|
||||
format: 'table'
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
vuln-type: 'os,library'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
- name: Upload a Build Artifact
|
||||
uses: actions/upload-artifact@v4.4.3
|
||||
uses: actions/upload-artifact@v4.6.2
|
||||
with:
|
||||
name: binaries
|
||||
path: bin/*
|
||||
|
@@ -39,6 +39,9 @@ extend-ignore-re = [
|
||||
"END_ND_TOOLTIPS" = "END_ND_TOOLTIPS"
|
||||
"EXPLAINed" = "EXPLAINed"
|
||||
"FH_ND_FILE" = "FH_ND_FILE"
|
||||
"GTI" = "GTI"
|
||||
"GTID" = "GTID"
|
||||
"GTIDs" = "GTIDs"
|
||||
"INSERTs" = "INSERTs"
|
||||
"IST" = "IST"
|
||||
"istError" = "istError"
|
||||
|
19
Changelog
19
Changelog
@@ -1,5 +1,21 @@
|
||||
Changelog for Percona Toolkit
|
||||
|
||||
v3.7.0-2 released 2025-05-14
|
||||
|
||||
This release addresses multiple security vulnerabilities reported in Percona Toolkit version 3.7.0, including issues related to the `libxml2` component (CVE-2024-56171, CVE-2025-24928), `openssl` (CVE-2024-12797), and `krb5` (CVE-2022-37967).
|
||||
|
||||
* Fixed bug PT-2442: percona-toolkit:latest Vulnerability [CVE-2024-56171 CVE-2024-12797 CVE-2022-37967 CVE-2025-24928]
|
||||
* Fixed bug PT-2375: pt-table-sync does not work with generated columns (Thanks to Henning Pöttker for the contribution)
|
||||
* Fixed bug PT-2400: pt-table-checksum reports the error for warning code 4164
|
||||
* Fixed bug PT-2377: pt-table-sync does not handle utf8 strings in JSON columns correctly (Thanks to Henning Pöttker for the contribution)
|
||||
* Fixed bug PT-2378: pt-table-sync does print floating point numbers in SQL statements with insufficient precision (Thanks to Henning Pöttker for the contribution)
|
||||
* Fixed bug PT-2389: pt-online-schema-change resume functionality doesn't work with ADD/DROP column (Thanks to Perry Harrington for the contribution)
|
||||
* Fixed bug PT-2410: pt-archiver brokes when using output-format=csv and has null values (Thanks to Roberto de Bem for the contribution)
|
||||
* Fixed bug PT-2422: pt-online-schema-change race overwrites new_table_name (Thanks to Perry Harrington for the contribution)
|
||||
* Fixed bug PT-2407: pt-online-schema-change exit status(return code) is 0 even if it does NOT succeed
|
||||
* Fixed bug PT-2355: pt-online-schema-change should not resume a job with empty boundaries
|
||||
* Fixed bug PT-1577: pt-secure-collect, replace pt-secure-data in Usage section
|
||||
|
||||
v3.7.0 released 2024-12-23
|
||||
|
||||
* Feature PT-2340: Support MySQL 8.4
|
||||
@@ -18,8 +34,7 @@ v3.6.0 released 2024-06-12
|
||||
* Improvement PR-160: added support for operf if present, and if CMD_OPCONTROL is not set (Thanks to Fernando Ipar for the contribution)
|
||||
* Improvement PR-140: Add CPU cache configuration info to pt-summary (Thanks to Alexey Kopytov for the contribution)
|
||||
* Improvement PR-765: pt-galera-log-explainer: improvements from feedbacks (Thanks Yoann La Cancellera for the contribution)
|
||||
* Improvement PT-2233: pt-k8s-debug-collector needs psql in the host node
|
||||
(Thanks to Jobin Augustine for the contribution)
|
||||
* Improvement PT-2233: pt-k8s-debug-collector needs psql in the host node (Thanks to Jobin Augustine for the contribution)
|
||||
* Improvement PR-772: pt-galera-log-explainer: add custom regexes parameter (Thanks Yoann La Cancellera for the contribution)
|
||||
* Improvement PT-2301: pt-galera-log-explainer: whois redesign (Thanks Yoann La Cancellera for the contribution)
|
||||
* Improvement PT-2190: pt-show-grants should use print_identified_with_as_hex
|
||||
|
@@ -11,7 +11,7 @@ MAKE_GOTOOLS
|
||||
|
||||
WriteMakefile(
|
||||
NAME => 'Percona::Toolkit',
|
||||
VERSION => '3.7.0',
|
||||
VERSION => '3.7.0-2',
|
||||
EXE_FILES => [
|
||||
map {
|
||||
(my $name = $_) =~ s/^bin.//;
|
||||
|
@@ -2993,7 +2993,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -5758,7 +5761,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
@@ -6966,7 +6969,7 @@ sub main {
|
||||
warn "Invalid output format:". $o->get('format');
|
||||
warn "Using default 'dump' format";
|
||||
} elsif ($o->get('output-format') || '' eq 'csv') {
|
||||
$fields_separated_by = ", ";
|
||||
$fields_separated_by = ",";
|
||||
$optionally_enclosed_by = '"';
|
||||
}
|
||||
my $need_hdr = $o->get('header') && !-f $archive_file;
|
||||
@@ -7508,7 +7511,7 @@ sub escape {
|
||||
s/([\t\n\\])/\\$1/g if defined $_; # Escape tabs etc
|
||||
my $s = defined $_ ? $_ : '\N'; # NULL = \N
|
||||
# var & ~var will return 0 only for numbers
|
||||
if ($s !~ /^[0-9,.E]+$/ && $optionally_enclosed_by eq '"') {
|
||||
if ($s !~ /^[0-9,.E]+$/ && $optionally_enclosed_by eq '"' && $s ne '\N') {
|
||||
$s =~ s/([^\\])"/$1\\"/g;
|
||||
$s = $optionally_enclosed_by."$s".$optionally_enclosed_by;
|
||||
}
|
||||
|
@@ -3063,11 +3063,24 @@ sub _parse_config {
|
||||
}
|
||||
elsif ( my $dbh = $args{dbh} ) {
|
||||
$config_data{format} = $args{format} || 'show_variables';
|
||||
my $mysql_version = _get_version($dbh);
|
||||
my $sql = "SHOW /*!40103 GLOBAL*/ VARIABLES";
|
||||
PTDEBUG && _d($dbh, $sql);
|
||||
my $rows = $dbh->selectall_arrayref($sql);
|
||||
$config_data{vars} = { map { @$_ } @$rows };
|
||||
$config_data{mysql_version} = _get_version($dbh);
|
||||
$config_data{vars} = {
|
||||
map {
|
||||
my ($variable, $value) = @$_;
|
||||
if ( length($value) == 1024 && $mysql_version ge '5.7.0' ) {
|
||||
my $var_sql = "SELECT \@\@global.$variable";
|
||||
PTDEBUG && _d($dbh, $var_sql);
|
||||
my $var_sth = $dbh->prepare($var_sql);
|
||||
$var_sth->execute();
|
||||
($value) = $var_sth->fetchrow_array();
|
||||
}
|
||||
$variable => $value
|
||||
} @$rows
|
||||
};
|
||||
$config_data{mysql_version} = $mysql_version;
|
||||
}
|
||||
else {
|
||||
die "Unknown config source";
|
||||
@@ -4934,7 +4947,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -2088,7 +2088,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -3981,7 +3984,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -4431,7 +4431,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -133,7 +133,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -4545,7 +4548,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -1690,7 +1690,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -3307,7 +3310,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -1242,7 +1242,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -3488,7 +3491,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -3564,7 +3564,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -5349,7 +5352,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -589,7 +589,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -5944,7 +5947,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -4905,7 +4905,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -6572,7 +6575,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -2866,7 +2866,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -5862,8 +5865,6 @@ sub _nibble_params {
|
||||
);
|
||||
PTDEBUG && _d('Ascend params:', Dumper($asc));
|
||||
|
||||
my $force_concat_enums;
|
||||
|
||||
|
||||
my $from = "$tbl->{name} FORCE INDEX(`$index`)";
|
||||
my $order_by = join(', ', map {$q->quote($_)} @{$index_cols});
|
||||
@@ -7857,7 +7858,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
@@ -8942,12 +8943,20 @@ sub main {
|
||||
$sth->finish();
|
||||
PTDEBUG && _d('Last chunk:', Dumper($last_chunk));
|
||||
|
||||
if ( !$last_chunk || !$last_chunk->{new_table_name} ) {
|
||||
if ( !$last_chunk ) {
|
||||
$oktorun = undef;
|
||||
_die("Option --resume refers non-existing job ID: ${old_job_id}. Exiting."
|
||||
, UNSUPPORTED_OPERATION);
|
||||
}
|
||||
|
||||
if ( !$last_chunk->{new_table_name}
|
||||
|| !$last_chunk->{lower_boundary}
|
||||
|| !$last_chunk->{upper_boundary} ) {
|
||||
$oktorun = undef;
|
||||
_die("Option --resume refers job ${old_job_id} with empty boundaries. Exiting."
|
||||
, UNSUPPORTED_OPERATION);
|
||||
}
|
||||
|
||||
if ( $last_chunk->{db} ne $db
|
||||
|| $last_chunk->{tbl} ne $tbl
|
||||
|| $last_chunk->{altr} ne $o->get('alter') ){
|
||||
@@ -9396,12 +9405,12 @@ sub main {
|
||||
};
|
||||
if ( $EVAL_ERROR ) {
|
||||
chomp $EVAL_ERROR;
|
||||
_die("Error checking --max-load or --critial-load: $EVAL_ERROR. "
|
||||
_die("Error checking --max-load or --critical-load: $EVAL_ERROR. "
|
||||
. "Check that the variables specified for --max-load and "
|
||||
. "--critical-load are spelled correctly and exist in "
|
||||
. "SHOW GLOBAL STATUS. Current values for these options are:\n"
|
||||
. " --max-load " . (join(',', @{$o->get('max-load')})) . "\n"
|
||||
. " --critial-load " . (join(',', @{$o->get('critical-load')}))
|
||||
. " --critical-load " . (join(',', @{$o->get('critical-load')}))
|
||||
, INVALID_PARAMETERS);
|
||||
}
|
||||
|
||||
@@ -9606,11 +9615,16 @@ sub main {
|
||||
# ''
|
||||
# doesn't match '(?-xism:Failed to find a unique new table name)'
|
||||
|
||||
# (*) Frank: commented them out because it caused infinite loop
|
||||
# and the mentioned test error doesn't arise
|
||||
|
||||
my $original_error = $EVAL_ERROR;
|
||||
my $original_error_code = $?;
|
||||
my $original_error_code;
|
||||
if ( $? ) {
|
||||
$original_error_code = $?;
|
||||
}
|
||||
else {
|
||||
$original_error_code = $!;
|
||||
}
|
||||
|
||||
$SIG{__DIE__} = 'DEFAULT';
|
||||
|
||||
foreach my $task ( reverse @cleanup_tasks ) {
|
||||
eval {
|
||||
@@ -9796,9 +9810,9 @@ sub main {
|
||||
|
||||
if ( $o->get('history') ) {
|
||||
my $sth = $cxn->dbh()->prepare(
|
||||
"UPDATE ${hist_table} SET new_table_name = ?"
|
||||
"UPDATE ${hist_table} SET new_table_name = ? WHERE job_id = ?"
|
||||
);
|
||||
$sth->execute($new_tbl->{tbl});
|
||||
$sth->execute($new_tbl->{tbl}, $job_id);
|
||||
}
|
||||
|
||||
# If the new table still exists, drop it unless the tool was interrupted.
|
||||
@@ -9911,7 +9925,7 @@ sub main {
|
||||
);
|
||||
}
|
||||
|
||||
if ( my $alter = $o->get('alter') ) {
|
||||
if ( (my $alter = $o->get('alter')) && !$o->get('resume') ) {
|
||||
print "Altering new table...\n";
|
||||
my $sql = "ALTER TABLE $new_tbl->{name} $alter";
|
||||
print $sql, "\n" if $o->get('print');
|
||||
@@ -9920,10 +9934,12 @@ sub main {
|
||||
$cxn->dbh()->do($sql);
|
||||
};
|
||||
if ( $EVAL_ERROR ) {
|
||||
if ( $plugin && $plugin->can('before_die') ) {
|
||||
$plugin->before_die(exit_status => $EVAL_ERROR);
|
||||
}
|
||||
if ( $plugin && $plugin->can('before_die') ) {
|
||||
$plugin->before_die(exit_status => $EVAL_ERROR);
|
||||
}
|
||||
# this is trapped by a signal handler. Don't replace it with _die
|
||||
# we need to override $SIG{__DIE__} to return correct error code
|
||||
$SIG{__DIE__} = sub { print(STDERR "$_[0]"); exit ERROR_ALTERING_TABLE; };
|
||||
die "Error altering new table $new_tbl->{name}: $EVAL_ERROR\n";
|
||||
}
|
||||
print "Altered $new_tbl->{name} OK.\n";
|
||||
|
@@ -1257,7 +1257,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -12789,7 +12792,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
@@ -16255,12 +16258,12 @@ type: string; default: report
|
||||
How to format and print the query analysis results. Accepted values are:
|
||||
|
||||
VALUE FORMAT
|
||||
======= ===============================
|
||||
======= ======================================
|
||||
report Standard query analysis report
|
||||
slowlog MySQL slow log
|
||||
json JSON, one array per query class
|
||||
json-anon JSON without example queries
|
||||
secure-slowlog JSON without example queries
|
||||
secure-slowlog MySQL slow log with anonymized queries
|
||||
|
||||
The entire C<report> output can be disabled by specifying C<--no-report>
|
||||
(see L<"--[no]report">), and its sections can be disabled or rearranged
|
||||
|
@@ -135,7 +135,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -4653,7 +4656,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -3790,7 +3790,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -899,7 +899,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
@@ -4134,7 +4134,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -6683,8 +6686,6 @@ sub _nibble_params {
|
||||
);
|
||||
PTDEBUG && _d('Ascend params:', Dumper($asc));
|
||||
|
||||
my $force_concat_enums;
|
||||
|
||||
|
||||
my $from = "$tbl->{name} FORCE INDEX(`$index`)";
|
||||
my $order_by = join(', ', map {$q->quote($_)} @{$index_cols});
|
||||
@@ -9081,11 +9082,24 @@ sub _parse_config {
|
||||
}
|
||||
elsif ( my $dbh = $args{dbh} ) {
|
||||
$config_data{format} = $args{format} || 'show_variables';
|
||||
my $mysql_version = _get_version($dbh);
|
||||
my $sql = "SHOW /*!40103 GLOBAL*/ VARIABLES";
|
||||
PTDEBUG && _d($dbh, $sql);
|
||||
my $rows = $dbh->selectall_arrayref($sql);
|
||||
$config_data{vars} = { map { @$_ } @$rows };
|
||||
$config_data{mysql_version} = _get_version($dbh);
|
||||
$config_data{vars} = {
|
||||
map {
|
||||
my ($variable, $value) = @$_;
|
||||
if ( length($value) == 1024 && $mysql_version ge '5.7.0' ) {
|
||||
my $var_sql = "SELECT \@\@global.$variable";
|
||||
PTDEBUG && _d($dbh, $var_sql);
|
||||
my $var_sth = $dbh->prepare($var_sql);
|
||||
$var_sth->execute();
|
||||
($value) = $var_sth->fetchrow_array();
|
||||
}
|
||||
$variable => $value
|
||||
} @$rows
|
||||
};
|
||||
$config_data{mysql_version} = $mysql_version;
|
||||
}
|
||||
else {
|
||||
die "Unknown config source";
|
||||
@@ -10271,11 +10285,11 @@ sub main {
|
||||
|
||||
|
||||
if ( $o->get('truncate-replicate-table') && $o->get('resume') ) {
|
||||
die "--resume and truncate-replicate-table are mutually exclusive";
|
||||
die "--resume and truncate-replicate-table are mutually exclusive";
|
||||
}
|
||||
|
||||
if ( $o->get('truncate-replicate-table') && !$o->get('empty-replicate-table') ) {
|
||||
die "--resume and --no-empty-replicate-table are mutually exclusive";
|
||||
die "--truncate-replicate-table and --no-empty-replicate-table are mutually exclusive";
|
||||
}
|
||||
|
||||
# ########################################################################
|
||||
@@ -11074,7 +11088,7 @@ sub main {
|
||||
. "(db, tbl, chunk, chunk_index,"
|
||||
. " lower_boundary, upper_boundary, this_cnt, this_crc) "
|
||||
. "SELECT"
|
||||
. ($cluster->is_cluster_node($source_cxn) ? ' /*!99997*/' : '')
|
||||
. ($cluster->is_cluster_node($source_cxn) ? ' /*!99997 */' : '')
|
||||
. " ?, ?, ?, ?, ?, ?,";
|
||||
my $past_cols = " COUNT(*), '0'";
|
||||
|
||||
|
@@ -1909,7 +1909,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -3612,6 +3615,14 @@ sub make_UPDATE {
|
||||
@cols = $self->sort_cols($row);
|
||||
}
|
||||
my $types = $self->{tbl_struct}->{type_for};
|
||||
|
||||
foreach my $col ( @cols ) {
|
||||
my $is_json = ($types->{$col} || '') =~ m/json/i;
|
||||
if ( $is_json && defined $row->{$col} ) {
|
||||
utf8::decode($row->{$col});
|
||||
}
|
||||
}
|
||||
|
||||
return "UPDATE $self->{dst_db_tbl} SET "
|
||||
. join(', ', map {
|
||||
my $is_hex = ($types->{$_} || '') =~ m/^0x[0-9a-fA-F]+$/i;
|
||||
@@ -3660,6 +3671,13 @@ sub make_row {
|
||||
my $q = $self->{Quoter};
|
||||
my $type_for = $self->{tbl_struct}->{type_for};
|
||||
|
||||
foreach my $col ( @cols ) {
|
||||
my $is_json = ($type_for->{$col} || '') =~ m/json/i;
|
||||
if ( $is_json && defined $row->{$col} ) {
|
||||
utf8::decode($row->{$col});
|
||||
}
|
||||
}
|
||||
|
||||
return "$verb INTO $self->{dst_db_tbl}("
|
||||
. join(', ', map { $q->quote($_) } @cols)
|
||||
. ') VALUES ('
|
||||
@@ -3708,6 +3726,7 @@ sub sort_cols {
|
||||
my @cols;
|
||||
if ( $self->{tbl_struct} ) {
|
||||
my $pos = $self->{tbl_struct}->{col_posn};
|
||||
my $is_generated = $self->{tbl_struct}->{is_generated};
|
||||
my @not_in_tbl;
|
||||
@cols = sort {
|
||||
$pos->{$a} <=> $pos->{$b}
|
||||
@@ -3721,6 +3740,9 @@ sub sort_cols {
|
||||
1;
|
||||
}
|
||||
}
|
||||
grep {
|
||||
!$is_generated->{$_}
|
||||
}
|
||||
sort keys %$row;
|
||||
push @cols, @not_in_tbl if @not_in_tbl;
|
||||
}
|
||||
@@ -9582,7 +9604,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -6680,7 +6680,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
|
@@ -1254,7 +1254,10 @@ sub quote_val {
|
||||
return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
return "'$val'";
|
||||
@@ -4294,7 +4297,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -4729,7 +4729,7 @@ sub version_check {
|
||||
PTDEBUG && _d('Version check failed:', $EVAL_ERROR);
|
||||
}
|
||||
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
update_check_times(
|
||||
instances => $instances_to_check,
|
||||
|
@@ -11,7 +11,6 @@ Vendor: Percona
|
||||
URL: http://www.percona.com/software/percona-toolkit/
|
||||
Source: percona-toolkit-%{version}.tar.gz
|
||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
|
||||
BuildArch: @@ARCHITECTURE@@
|
||||
|
||||
BuildRequires: perl(ExtUtils::MakeMaker) make
|
||||
Requires: perl(DBI) >= 1.13, perl(DBD::mysql) >= 1.0, perl(Time::HiRes), perl(IO::Socket::SSL), perl(Digest::MD5), perl(Term::ReadKey)
|
||||
|
@@ -333,11 +333,6 @@ build_srpm(){
|
||||
cd ${WORKDIR}/rpmbuild/SPECS
|
||||
echo '%undefine _missing_build_ids_terminate_build' | cat - percona-toolkit.spec > pt.spec && mv pt.spec percona-toolkit.spec
|
||||
echo '%define debug_package %{nil}' | cat - percona-toolkit.spec > pt.spec && mv pt.spec percona-toolkit.spec
|
||||
if [ x"$ARCH" = "xaarch64" ]; then
|
||||
sed -i "s/@@ARCHITECTURE@@/aarch64/" percona-toolkit.spec
|
||||
else
|
||||
sed -i "s/@@ARCHITECTURE@@/x86_64/" percona-toolkit.spec
|
||||
fi
|
||||
|
||||
cd ${WORKDIR}/${PRODUCT_FULL}
|
||||
rm -rf bin/govendor
|
||||
@@ -398,7 +393,7 @@ build_rpm(){
|
||||
ARCH=$(echo $(uname -m) | sed -e 's:i686:i386:g')
|
||||
echo "RHEL=${RHEL}" >> percona-toolkit.properties
|
||||
echo "ARCH=${ARCH}" >> percona-toolkit.properties
|
||||
rpmbuild --target=${ARCH} --define "version $VERSION" --define "VERSION $VERSION" --define "dist .el${RHEL}" --define "release $RPM_RELEASE.el${RHEL}" --define "_topdir ${WORKDIR}/rpmbuild" --rebuild rpmbuild/SRPMS/${SRC_RPM}
|
||||
rpmbuild --define "version $VERSION" --define "VERSION $VERSION" --define "dist .el${RHEL}" --define "release $RPM_RELEASE.el${RHEL}" --define "_topdir ${WORKDIR}/rpmbuild" --rebuild rpmbuild/SRPMS/${SRC_RPM}
|
||||
|
||||
return_code=$?
|
||||
if [ $return_code != 0 ]; then
|
||||
@@ -587,8 +582,8 @@ OS_NAME=
|
||||
ARCH=
|
||||
OS=
|
||||
INSTALL=0
|
||||
RPM_RELEASE=1
|
||||
DEB_RELEASE=1
|
||||
RPM_RELEASE=2
|
||||
DEB_RELEASE=2
|
||||
REVISION=0
|
||||
GIT_BRANCH=${GIT_BRANCH}
|
||||
GIT_REPO=https://github.com/percona/percona-toolkit.git
|
||||
|
@@ -22,3 +22,4 @@
|
||||
{% endif %}
|
||||
</ul>
|
||||
</nav>
|
||||
|
||||
|
@@ -1,9 +1,18 @@
|
||||
<div class="md-relbar2__inner md-grid">
|
||||
<div class="md-content">
|
||||
<article class="md-content__inner md-typeset" role="main">
|
||||
<h4>Get Expert Help </h4>
|
||||
<p>If you need assistance, visit the <a class="reference external" href="https://forums.percona.com/c/polyglot-projects/percona-toolkit/13?utm_campaign=Doc%20pages" target="_blank">community forum</a> for comprehensive and free database knowledge.</p>
|
||||
<p>Contact our <a class="reference external" href="https://www.percona.com/about/contact">Percona Database Experts</a> for professional support and services.</p>
|
||||
<svg width="78" height="69" viewBox="0 0 78 69" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M56.7281 30.7666C62.6528 26.8938 64.5914 18.9942 61.0059 12.7854C59.2094 9.67113 56.3053 7.44082 52.8311 6.50951C49.6122 5.64535 46.2502 5.99872 43.2912 7.49366L39.251 0.5L30.8648 15.0245L11.4811 48.5937H67.021L56.7281 30.7666ZM51.881 10.0674C54.4064 10.7401 56.5079 12.3616 57.8168 14.6194C60.3895 19.0701 59.053 24.7153 54.8808 27.5665L45.1362 10.6905C47.2355 9.68104 49.6034 9.46087 51.881 10.0674ZM39.251 7.87125L60.6339 44.907H48.1228L32.9927 18.7102L39.2499 7.87235L39.251 7.87125ZM17.8682 44.907L30.8637 22.4035L43.8592 44.907H17.8682Z" fill="url(#paint0_linear_2899_1968)"/>
|
||||
<path d="M4.981 64.943H3.157V68.207H.756V57.323H5.217C7.822 57.323 9.397 58.861 9.397 61.086V61.116C9.397 63.635 7.433 64.94 4.984 64.94L4.981 64.943V64.943ZM6.961 61.134C6.961 60.061 6.213 59.485 5.011 59.485H3.154V62.812H5.056C6.258 62.812 6.958 62.096 6.958 61.163V61.134H6.961ZM10.738 68.208V57.323H18.973V59.455H13.124V61.664H18.27V63.796H13.124V66.082H19.051V68.214H10.738V68.208 68.208ZM27.557 68.208 25.218 64.726H23.332V68.208H20.931V57.323H25.921C28.496 57.323 30.039 58.677 30.039 60.915V60.945C30.039 62.702 29.088 63.807 27.7 64.32L30.367 68.207H27.556L27.557 68.208ZM27.605 61.041C27.605 60.016 26.887 59.485 25.719 59.485H23.333V62.61H25.767C26.936 62.61 27.605 61.987 27.605 61.071V61.042 61.041ZM36.922 68.499C33.668 68.499 31.249 65.994 31.249 62.825V62.795C31.249 59.659 33.619 57.091 37.019 57.091 39.105 57.091 40.356 57.783 41.383 58.792L39.834 60.571C38.98 59.798 38.113 59.327 37.004 59.327 35.141 59.327 33.795 60.871 33.795 62.762V62.793C33.795 64.684 35.107 66.257 37.004 66.257 38.268 66.257 39.043 65.753 39.913 64.964L41.462 66.524C40.322 67.738 39.059 68.493 36.925 68.493L36.922 68.499ZM47.885 68.499C44.47 68.499 42.021 65.962 42.021 62.825V62.795C42.021 59.659 44.503 57.091 47.915 57.091 51.327 57.091 53.779 59.628 53.779 62.765V62.795C53.779 65.931 51.297 68.499 47.885 68.499ZM51.237 62.795C51.237 60.904 49.846 59.331 47.885 59.331 45.925 59.331 44.567 60.874 44.567 62.766V62.796C44.567 64.688 45.959 66.261 47.919 66.261 49.879 66.261 51.237 64.717 51.237 62.826V62.795 62.795ZM67.001 68.217 72.374 57.091 77.746 68.218H75.052L72.374 62.681 69.705 68.218H67.001V68.217ZM66.007 57.327V68.5L57.813 61.884V68.199H55.264V57.091L63.458 63.681V57.327H66.007Z" fill="#2C323E"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_2899_1968" x1="18.1513" y1="44.7152" x2="61.4356" y2="20.9786" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FC3519"/>
|
||||
<stop offset="1" stop-color="#F0D136"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
<p>For help, click the link below to get free database assistance or contact our experts for personalized support.</p>
|
||||
<p> <a class="reference external" href="https://docs.percona.com/percona-toolkit/get-help.html" target="_blank">Get help from Percona</a> </p>
|
||||
<hr>
|
||||
{# Add the last updated timestamp from git commits using gitstamp extension #}
|
||||
{%- if gitstamp %} <small> Last update: {{gitstamp}} </small>
|
||||
|
@@ -31,3 +31,4 @@ body,input{
|
||||
font-feature-settings:"kern","liga";
|
||||
font-family:"Chivo", "Colfax", "Franziska", Helvetica, Arial, sans-serif;
|
||||
}
|
||||
|
||||
|
9
config/sphinx-build/_static/percona-logo-1.svg
Normal file
9
config/sphinx-build/_static/percona-logo-1.svg
Normal file
@@ -0,0 +1,9 @@
|
||||
<svg width="520" height="451" viewBox="0 0 520 451" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M423.631 283.376C479.103 247.116 497.253 173.155 463.684 115.024C446.863 85.8662 419.673 64.9845 387.145 56.265C357.008 48.1741 325.531 51.4826 297.826 65.4793L260 0L181.483 135.988L0 450.285H520L423.631 283.376ZM378.25 89.5766C401.894 95.8741 421.57 111.056 433.825 132.195C457.912 173.866 445.399 226.719 406.336 253.414L315.1 95.4103C334.755 85.9589 356.925 83.8976 378.25 89.5766ZM260 69.0145L460.2 415.767H343.063L201.406 170.496L259.99 69.0248L260 69.0145ZM59.8002 415.767L181.472 205.075L303.144 415.767H59.8002Z" fill="url(#paint0_linear_552_15)"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_552_15" x1="58.6494" y1="460.228" x2="444.575" y2="74.3019" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FC3519"/>
|
||||
<stop offset="1" stop-color="#F0D136"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
After Width: | Height: | Size: 901 B |
@@ -1,9 +1,10 @@
|
||||
<svg width="520" height="451" viewBox="0 0 520 451" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M423.631 283.376C479.103 247.116 497.253 173.155 463.684 115.024C446.863 85.8662 419.673 64.9845 387.145 56.265C357.008 48.1741 325.531 51.4826 297.826 65.4793L260 0L181.483 135.988L0 450.285H520L423.631 283.376ZM378.25 89.5766C401.894 95.8741 421.57 111.056 433.825 132.195C457.912 173.866 445.399 226.719 406.336 253.414L315.1 95.4103C334.755 85.9589 356.925 83.8976 378.25 89.5766ZM260 69.0145L460.2 415.767H343.063L201.406 170.496L259.99 69.0248L260 69.0145ZM59.8002 415.767L181.472 205.075L303.144 415.767H59.8002Z" fill="url(#paint0_linear_552_15)"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_552_15" x1="58.6494" y1="460.228" x2="444.575" y2="74.3019" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FC3519"/>
|
||||
<stop offset="1" stop-color="#F0D136"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
||||
<svg width="78" height="69" viewBox="0 0 78 69" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M56.7281 30.7666C62.6528 26.8938 64.5914 18.9942 61.0059 12.7854C59.2094 9.67113 56.3053 7.44082 52.8311 6.50951C49.6122 5.64535 46.2502 5.99872 43.2912 7.49366L39.251 0.5L30.8648 15.0245L11.4811 48.5937H67.021L56.7281 30.7666ZM51.881 10.0674C54.4064 10.7401 56.5079 12.3616 57.8168 14.6194C60.3895 19.0701 59.053 24.7153 54.8808 27.5665L45.1362 10.6905C47.2355 9.68104 49.6034 9.46087 51.881 10.0674ZM39.251 7.87125L60.6339 44.907H48.1228L32.9927 18.7102L39.2499 7.87235L39.251 7.87125ZM17.8682 44.907L30.8637 22.4035L43.8592 44.907H17.8682Z" fill="url(#paint0_linear_2899_1968)"/>
|
||||
<path d="M4.981 64.943H3.157V68.207H.756V57.323H5.217C7.822 57.323 9.397 58.861 9.397 61.086V61.116C9.397 63.635 7.433 64.94 4.984 64.94L4.981 64.943V64.943ZM6.961 61.134C6.961 60.061 6.213 59.485 5.011 59.485H3.154V62.812H5.056C6.258 62.812 6.958 62.096 6.958 61.163V61.134H6.961ZM10.738 68.208V57.323H18.973V59.455H13.124V61.664H18.27V63.796H13.124V66.082H19.051V68.214H10.738V68.208 68.208ZM27.557 68.208 25.218 64.726H23.332V68.208H20.931V57.323H25.921C28.496 57.323 30.039 58.677 30.039 60.915V60.945C30.039 62.702 29.088 63.807 27.7 64.32L30.367 68.207H27.556L27.557 68.208ZM27.605 61.041C27.605 60.016 26.887 59.485 25.719 59.485H23.333V62.61H25.767C26.936 62.61 27.605 61.987 27.605 61.071V61.042 61.041ZM36.922 68.499C33.668 68.499 31.249 65.994 31.249 62.825V62.795C31.249 59.659 33.619 57.091 37.019 57.091 39.105 57.091 40.356 57.783 41.383 58.792L39.834 60.571C38.98 59.798 38.113 59.327 37.004 59.327 35.141 59.327 33.795 60.871 33.795 62.762V62.793C33.795 64.684 35.107 66.257 37.004 66.257 38.268 66.257 39.043 65.753 39.913 64.964L41.462 66.524C40.322 67.738 39.059 68.493 36.925 68.493L36.922 68.499ZM47.885 68.499C44.47 68.499 42.021 65.962 42.021 62.825V62.795C42.021 59.659 44.503 57.091 47.915 57.091 51.327 57.091 53.779 59.628 53.779 62.765V62.795C53.779 65.931 51.297 68.499 47.885 68.499ZM51.237 62.795C51.237 60.904 49.846 59.331 47.885 59.331 45.925 59.331 44.567 60.874 44.567 62.766V62.796C44.567 64.688 45.959 66.261 47.919 66.261 49.879 66.261 51.237 64.717 51.237 62.826V62.795 62.795ZM67.001 68.217 72.374 57.091 77.746 68.218H75.052L72.374 62.681 69.705 68.218H67.001V68.217ZM66.007 57.327V68.5L57.813 61.884V68.199H55.264V57.091L63.458 63.681V57.327H66.007Z" fill="#2C323E"/>
|
||||
<defs>
|
||||
<linearGradient id="paint0_linear_2899_1968" x1="18.1513" y1="44.7152" x2="61.4356" y2="20.9786" gradientUnits="userSpaceOnUse">
|
||||
<stop stop-color="#FC3519"/>
|
||||
<stop offset="1" stop-color="#F0D136"/>
|
||||
</linearGradient>
|
||||
</defs>
|
||||
</svg>
|
Before Width: | Height: | Size: 901 B After Width: | Height: | Size: 2.6 KiB |
@@ -41,7 +41,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Percona Toolkit'
|
||||
copyright = u'2024, Percona LLC and/or its affiliates'
|
||||
copyright = u'2025, Percona LLC and/or its affiliates'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
@@ -50,7 +50,7 @@ copyright = u'2024, Percona LLC and/or its affiliates'
|
||||
# The short X.Y version.
|
||||
version = '3.7'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '3.7.0'
|
||||
release = '3.7.0-2'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@@ -202,7 +202,7 @@ htmlhelp_basename = 'PerconaToolkitdoc'
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'PerconaToolkit.tex', u'Percona Toolkit Documentation',
|
||||
u'2024, Percona LLC and/or its affiliates', 'manual'),
|
||||
u'2025, Percona LLC and/or its affiliates', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@@ -236,5 +236,5 @@ latex_toplevel_sectioning = 'part'
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'perconatoolkit', u'Percona Toolkit Documentation',
|
||||
[u'2024, Percona LLC and/or its affiliates'], 1)
|
||||
[u'2025, Percona LLC and/or its affiliates'], 1)
|
||||
]
|
||||
|
40
docs/get-help.rst
Normal file
40
docs/get-help.rst
Normal file
@@ -0,0 +1,40 @@
|
||||
.. _get-help:
|
||||
|
||||
Get help from Percona
|
||||
**********************
|
||||
|
||||
This guide is packed with information but can’t cover everything you need to know about Percona Toolkit or every scenario you might encounter. Don’t be afraid to try things out and ask questions when you get stuck.
|
||||
|
||||
Ask a question in the Community Forum
|
||||
=======================================
|
||||
|
||||
Be a part of a space where you can tap into a wealth of knowledge from other database enthusiasts and experts who work with Percona’s software every day. While our service is entirely free, keep in mind that response times can vary depending on the complexity of the question. You are engaging with people who genuinely love solving database challenges.
|
||||
|
||||
We recommend visiting our `Community Forum <https://forums.percona.com/t/welcome-to-perconas-community-forum/7>`_.
|
||||
It’s an excellent place for discussions, technical insights, and support around Percona database software. If you’re new and feeling a bit unsure, our `FAQ <https://forums.percona.com/faq>`_ and `Guide for New Users <https://forums.percona.com/t/faq-guide-for-new-users/8562>`_ ease you in.
|
||||
|
||||
If you have thoughts, feedback, or ideas, the community team would like to hear from you at `Any ideas on how to make the forum better? <https://forums.percona.com/t/any-ideas-on-how-to-make-the-forum-better/11522>`_.
|
||||
We’re always excited to connect and improve everyone's experience.
|
||||
|
||||
Work with a Percona Expert
|
||||
==============================
|
||||
|
||||
Percona experts bring years of experience in tackling tough database performance issues and design challenges.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<div data-tf-live="01JKGY9435F75X6DHG92DJZB26"></div>
|
||||
<script src="//embed.typeform.com/next/embed.js"></script>
|
||||
|
||||
We understand your challenges when managing complex database environments. That's why we offer various services to help you simplify your operations and achieve your goals.
|
||||
|
||||
+----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Service | Description |
|
||||
+----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| 24/7 Expert Support | Our dedicated team of database experts is available 24/7 to assist you with any database issues. We provide flexible support plans tailored to your specific needs. |
|
||||
| Hands-On Database Management | Our managed services team can take over the day-to-day management of your database infrastructure, freeing up your time to focus on other priorities. |
|
||||
| Expert Consulting | Our experienced consultants provide guidance on database topics like architecture design, migration planning, performance optimization, and security best practices. |
|
||||
| Comprehensive Training | Our training programs help your team develop skills to manage databases effectively, offering virtual and in-person courses. |
|
||||
+----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
We're here to help you every step of the way. Whether you need a quick fix or a long-term partnership, we're ready to provide your expertise and support.
|
@@ -29,6 +29,7 @@ Getting Percona Toolkit
|
||||
:glob:
|
||||
|
||||
installation
|
||||
get-help
|
||||
|
||||
Tools
|
||||
=====
|
||||
|
@@ -529,7 +529,7 @@ list of supported platforms and versions.
|
||||
|
||||
In order to support IPv6 addresses to connect to MySQL, Perl DBD::MySQL driver v4.033_01 is
|
||||
required. Also, as stated in RFC 3986 L<https://www.ietf.org/rfc/rfc3986.txt> section 3.2.2
|
||||
brackes must be used to distinguish host and port.
|
||||
brackets must be used to distinguish host and port.
|
||||
Examples: L<https://metacpan.org/pod/DBD::mysql#port>
|
||||
|
||||
=head1 BUGS
|
||||
|
@@ -1,6 +1,26 @@
|
||||
Release Notes
|
||||
***************
|
||||
|
||||
v3.7.0-2 released 2025-05-14
|
||||
==============================
|
||||
|
||||
This release addresses multiple security vulnerabilities reported in Percona Toolkit version 3.7.0, including issues related to the `libxml2` component (CVE-2024-56171, CVE-2025-24928), `openssl` (CVE-2024-12797), and `krb5` (CVE-2022-37967).
|
||||
|
||||
Bug Fixed
|
||||
------------
|
||||
|
||||
* :jirabug:`PT-2442`: percona-toolkit:latest Vulnerability [CVE-2024-56171 CVE-2024-12797 CVE-2022-37967 CVE-2025-24928]
|
||||
* :jirabug:`PT-2375`: pt-table-sync does not work with generated columns (Thanks to Henning Pöttker for the contribution)
|
||||
* :jirabug:`PT-2400`: pt-table-checksum reports the error for warning code 4164
|
||||
* :jirabug:`PT-2377`: pt-table-sync does not handle utf8 strings in JSON columns correctly (Thanks to Henning Pöttker for the contribution)
|
||||
* :jirabug:`PT-2378`: pt-table-sync does print floating point numbers in SQL statements with insufficient precision (Thanks to Henning Pöttker for the contribution)
|
||||
* :jirabug:`PT-2389`: pt-online-schema-change resume functionality doesn't work with ADD/DROP column (Thanks to Perry Harrington for the contribution)
|
||||
* :jirabug:`PT-2410`: pt-archiver brokes when using output-format=csv and has null values (Thanks to Roberto de Bem for the contribution)
|
||||
* :jirabug:`PT-2422`: pt-online-schema-change race overwrites new_table_name (Thanks to Perry Harrington for the contribution)
|
||||
* :jirabug:`PT-2407`: pt-online-schema-change exit status(return code) is 0 even if it does NOT succeed
|
||||
* :jirabug:`PT-2355`: pt-online-schema-change should not resume a job with empty boundaries
|
||||
* :jirabug:`PT-1577`: pt-secure-collect, replace pt-secure-data in Usage section
|
||||
|
||||
v3.7.0 released 2024-12-23
|
||||
==============================
|
||||
|
||||
|
34
go.mod
34
go.mod
@@ -1,17 +1,19 @@
|
||||
module github.com/percona/percona-toolkit
|
||||
|
||||
go 1.23.4
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/AlekSi/pointer v1.2.0
|
||||
github.com/Ladicle/tabwriter v1.0.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible
|
||||
github.com/alecthomas/kong v1.6.0
|
||||
github.com/alecthomas/kong v1.11.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-version v1.7.0
|
||||
github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef
|
||||
@@ -21,17 +23,17 @@ require (
|
||||
github.com/pborman/getopt v1.1.0
|
||||
github.com/percona/go-mysql v0.0.0-20210427141028-73d29c6da78c
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/xlab/treeprint v1.2.0
|
||||
go.mongodb.org/mongo-driver v1.17.1
|
||||
golang.org/x/crypto v0.31.0
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/api v0.33.1
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
)
|
||||
|
||||
@@ -43,9 +45,8 @@ require (
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.3 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
@@ -60,16 +61,17 @@ require (
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.32.0 // indirect
|
||||
k8s.io/apimachinery v0.33.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
69
go.sum
69
go.sum
@@ -8,8 +8,8 @@ github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8v
|
||||
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/kong v1.6.0 h1:mwOzbdMR7uv2vul9J0FU3GYxE7ls/iX1ieMg5WIM6gE=
|
||||
github.com/alecthomas/kong v1.6.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/kong v1.11.0 h1:y++1gI7jf8O7G7l4LZo5ASFhrhJvzc+WgF/arranEmM=
|
||||
github.com/alecthomas/kong v1.11.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
@@ -38,11 +38,9 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
@@ -55,8 +53,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
||||
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
|
||||
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -86,11 +84,11 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
@@ -125,14 +123,14 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM=
|
||||
go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4=
|
||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -146,17 +144,15 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -172,18 +168,18 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@@ -207,17 +203,20 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw=
|
||||
k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw=
|
||||
k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
|
||||
k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
@@ -324,6 +324,16 @@ sub make_UPDATE {
|
||||
@cols = $self->sort_cols($row);
|
||||
}
|
||||
my $types = $self->{tbl_struct}->{type_for};
|
||||
|
||||
# MySQL uses utf8mb4 for all strings in JSON, but
|
||||
# DBD::mysql does not decode it accordingly
|
||||
foreach my $col ( @cols ) {
|
||||
my $is_json = ($types->{$col} || '') =~ m/json/i;
|
||||
if ( $is_json && defined $row->{$col} ) {
|
||||
utf8::decode($row->{$col});
|
||||
}
|
||||
}
|
||||
|
||||
return "UPDATE $self->{dst_db_tbl} SET "
|
||||
. join(', ', map {
|
||||
my $is_hex = ($types->{$_} || '') =~ m/^0x[0-9a-fA-F]+$/i;
|
||||
@@ -403,6 +413,15 @@ sub make_row {
|
||||
my $q = $self->{Quoter};
|
||||
my $type_for = $self->{tbl_struct}->{type_for};
|
||||
|
||||
# MySQL uses utf8mb4 for all strings in JSON, but
|
||||
# DBD::mysql does not decode it accordingly
|
||||
foreach my $col ( @cols ) {
|
||||
my $is_json = ($type_for->{$col} || '') =~ m/json/i;
|
||||
if ( $is_json && defined $row->{$col} ) {
|
||||
utf8::decode($row->{$col});
|
||||
}
|
||||
}
|
||||
|
||||
return "$verb INTO $self->{dst_db_tbl}("
|
||||
. join(', ', map { $q->quote($_) } @cols)
|
||||
. ') VALUES ('
|
||||
@@ -462,7 +481,8 @@ sub get_changes {
|
||||
|
||||
|
||||
# Sub: sort_cols
|
||||
# Sort a row's columns based on their real order in the table.
|
||||
# Sort a row's columns based on their real order in the table, and remove
|
||||
# generated columns.
|
||||
# This requires that the optional tbl_struct arg was passed to <new()>.
|
||||
# If not, the rows are sorted alphabetically.
|
||||
#
|
||||
@@ -476,6 +496,7 @@ sub sort_cols {
|
||||
my @cols;
|
||||
if ( $self->{tbl_struct} ) {
|
||||
my $pos = $self->{tbl_struct}->{col_posn};
|
||||
my $is_generated = $self->{tbl_struct}->{is_generated};
|
||||
my @not_in_tbl;
|
||||
@cols = sort {
|
||||
$pos->{$a} <=> $pos->{$b}
|
||||
@@ -489,6 +510,9 @@ sub sort_cols {
|
||||
1;
|
||||
}
|
||||
}
|
||||
grep {
|
||||
!$is_generated->{$_}
|
||||
}
|
||||
sort keys %$row;
|
||||
push @cols, @not_in_tbl if @not_in_tbl;
|
||||
}
|
||||
|
@@ -56,7 +56,7 @@ sub check_type_constraints {
|
||||
. (defined $val ? Lmo::Dumper($val) : 'undef') )
|
||||
}
|
||||
|
||||
# Nested (or parametized) constraints look like this: ArrayRef[CONSTRAINT] or
|
||||
# Nested (or parameritized) constraints look like this: ArrayRef[CONSTRAINT] or
|
||||
# Maybe[CONSTRAINT]. This function returns a coderef that implements one of
|
||||
# these.
|
||||
sub _nested_constraints {
|
||||
@@ -64,7 +64,7 @@ sub _nested_constraints {
|
||||
|
||||
my $inner_types;
|
||||
if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) {
|
||||
# If the inner constraint -- the part within brackets -- is also a parametized
|
||||
# If the inner constraint -- the part within brackets -- is also a parametirized
|
||||
# constraint, then call this function recursively.
|
||||
$inner_types = _nested_constraints($1, $2);
|
||||
}
|
||||
|
@@ -111,11 +111,30 @@ sub _parse_config {
|
||||
}
|
||||
elsif ( my $dbh = $args{dbh} ) {
|
||||
$config_data{format} = $args{format} || 'show_variables';
|
||||
my $mysql_version = _get_version($dbh);
|
||||
my $sql = "SHOW /*!40103 GLOBAL*/ VARIABLES";
|
||||
PTDEBUG && _d($dbh, $sql);
|
||||
my $rows = $dbh->selectall_arrayref($sql);
|
||||
$config_data{vars} = { map { @$_ } @$rows };
|
||||
$config_data{mysql_version} = _get_version($dbh);
|
||||
$config_data{vars} = {
|
||||
map {
|
||||
my ($variable, $value) = @$_;
|
||||
# Starting from MySQL 5.7.6, SHOW VARIABLES retrieves records from
|
||||
# the performance_schema table named GLOBAL_VARIABLES. This table
|
||||
# stores variable values in a VARCHAR(1024) column, meaning longer
|
||||
# values may be truncated. However, the full value can still be
|
||||
# retrieved by accessing the variable with SELECT @@GLOBAL.
|
||||
# https://dev.mysql.com/doc/refman/5.7/en/information-schema-variables-table.html
|
||||
if ( length($value) == 1024 && $mysql_version ge '5.7.0' ) {
|
||||
my $var_sql = "SELECT \@\@global.$variable";
|
||||
PTDEBUG && _d($dbh, $var_sql);
|
||||
my $var_sth = $dbh->prepare($var_sql);
|
||||
$var_sth->execute();
|
||||
($value) = $var_sth->fetchrow_array();
|
||||
}
|
||||
$variable => $value
|
||||
} @$rows
|
||||
};
|
||||
$config_data{mysql_version} = $mysql_version;
|
||||
}
|
||||
else {
|
||||
die "Unknown config source";
|
||||
|
@@ -208,10 +208,6 @@ sub _nibble_params {
|
||||
);
|
||||
PTDEBUG && _d('Ascend params:', Dumper($asc));
|
||||
|
||||
# Check if enum fields items are sorted or not.
|
||||
# If they are sorted we can skip adding CONCAT to improve the queries eficiency.
|
||||
my $force_concat_enums;
|
||||
|
||||
# Make SQL statements, prepared on first call to next(). FROM and
|
||||
# ORDER BY are the same for all statements. FORCE IDNEX and ORDER BY
|
||||
# are needed to ensure deterministic nibbling.
|
||||
|
@@ -507,7 +507,7 @@ sub find {
|
||||
}
|
||||
my $reason = 'Exceeds busy time';
|
||||
PTDEBUG && _d($reason);
|
||||
# Saving the reasons for each query in the objct is a bit nasty,
|
||||
# Saving the reasons for each query in the object is a bit nasty,
|
||||
# but the alternatives are worse:
|
||||
# - Saving internal data in the query
|
||||
# - Instead of using the stringified hashref as a key, using
|
||||
|
@@ -78,7 +78,10 @@ sub quote_val {
|
||||
&& !$args{is_char}; # unless is_char is true
|
||||
|
||||
# https://bugs.launchpad.net/percona-toolkit/+bug/1229861
|
||||
return $val if $args{is_float};
|
||||
if ( $args{is_float} ) {
|
||||
return sprintf("%.17g", $val) if $val - "$val" != 0;
|
||||
return $val;
|
||||
}
|
||||
|
||||
# Quote and return non-numeric vals.
|
||||
$val =~ s/(['\\])/\\$1/g;
|
||||
|
@@ -1140,7 +1140,7 @@ sub timestampdiff {
|
||||
# Some column types can store invalid values, like most of the temporal
|
||||
# types. When evaluated, invalid values return NULL. If the value is
|
||||
# NULL to begin with, then it is not invalid because NULL is valid.
|
||||
# For example, TO_DAYS('2009-00-00') evalues to NULL because that date
|
||||
# For example, TO_DAYS('2009-00-00') evaluates to NULL because that date
|
||||
# is invalid, even though it's storable.
|
||||
sub get_valid_end_points {
|
||||
my ( $self, %args ) = @_;
|
||||
|
@@ -181,7 +181,7 @@ sub version_check {
|
||||
}
|
||||
|
||||
# Always update the vc file, even if the version check fails.
|
||||
if ( @$instances_to_check ) {
|
||||
if ( $instances_to_check and @$instances_to_check ) {
|
||||
eval {
|
||||
# Update the check time for things we checked. I.e. if we
|
||||
# didn't check it, do _not_ update its time.
|
||||
|
@@ -14,8 +14,8 @@ sphinxcontrib-srclinks
|
||||
sphinx-tabs
|
||||
|
||||
certifi>=2024.7.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
jinja2>=3.1.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
jinja2>=3.1.6 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
pygments>=2.15.0 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
requests>=2.31.0 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
requests>=2.32.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
setuptools>=78.1.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
idna>=3.7 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
@@ -10,11 +10,12 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -33,7 +34,8 @@ func TestMain(m *testing.M) {
|
||||
log.Printf("cannot get root path: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestSingleFingerprint(t *testing.T) {
|
||||
|
@@ -7,14 +7,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
tu "github.com/percona/percona-toolkit/src/go/internal/testutils"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/stats"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -38,7 +39,8 @@ func TestMain(m *testing.M) {
|
||||
log.Printf("cannot get root path: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestRegularIterator(t *testing.T) {
|
||||
|
@@ -8,85 +8,104 @@ import (
|
||||
)
|
||||
|
||||
// docsExamined is renamed from nscannedObjects in 3.2.0
|
||||
// json tags are used for PMM purposes
|
||||
// https://docs.mongodb.com/manual/reference/database-profiler/#system.profile.docsExamined
|
||||
type SystemProfile struct {
|
||||
AllUsers []interface{} `bson:"allUsers"`
|
||||
Client string `bson:"client"`
|
||||
CursorExhausted bool `bson:"cursorExhausted"`
|
||||
DocsExamined int `bson:"docsExamined"`
|
||||
NscannedObjects int `bson:"nscannedObjects"`
|
||||
AllUsers []interface{} `bson:"allUsers" json:"allUsers"`
|
||||
Client string `bson:"client" json:"client"`
|
||||
CursorExhausted bool `bson:"cursorExhausted" json:"cursorExhausted"`
|
||||
ExecStats struct {
|
||||
Advanced int `bson:"advanced"`
|
||||
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate"`
|
||||
Advanced int `bson:"advanced" json:"advanced"`
|
||||
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate" json:"executionTimeMillisEstimate"`
|
||||
InputStage struct {
|
||||
Advanced int `bson:"advanced"`
|
||||
Direction string `bson:"direction"`
|
||||
DocsExamined int `bson:"docsExamined"`
|
||||
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate"`
|
||||
Advanced int `bson:"advanced" json:"advanced"`
|
||||
Direction string `bson:"direction" json:"direction"`
|
||||
DocsExamined int `bson:"docsExamined" json:"docsExamined"`
|
||||
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate" json:"executionTimeMillisEstimate"`
|
||||
Filter struct {
|
||||
Date struct {
|
||||
Eq string `bson:"$eq"`
|
||||
} `bson:"date"`
|
||||
} `bson:"filter"`
|
||||
Invalidates int `bson:"invalidates"`
|
||||
IsEOF int `bson:"isEOF"`
|
||||
NReturned int `bson:"nReturned"`
|
||||
NeedTime int `bson:"needTime"`
|
||||
NeedYield int `bson:"needYield"`
|
||||
RestoreState int `bson:"restoreState"`
|
||||
SaveState int `bson:"saveState"`
|
||||
Stage string `bson:"stage"`
|
||||
Works int `bson:"works"`
|
||||
} `bson:"inputStage"`
|
||||
Invalidates int `bson:"invalidates"`
|
||||
IsEOF int `bson:"isEOF"`
|
||||
LimitAmount int `bson:"limitAmount"`
|
||||
NReturned int `bson:"nReturned"`
|
||||
NeedTime int `bson:"needTime"`
|
||||
NeedYield int `bson:"needYield"`
|
||||
RestoreState int `bson:"restoreState"`
|
||||
SaveState int `bson:"saveState"`
|
||||
Stage string `bson:"stage"`
|
||||
Works int `bson:"works"`
|
||||
} `bson:"execStats"`
|
||||
KeyUpdates int `bson:"keyUpdates"`
|
||||
KeysExamined int `bson:"keysExamined"`
|
||||
Eq string `bson:"$eq" json:"$eq"`
|
||||
} `bson:"date" json:"date"`
|
||||
} `bson:"filter" json:"filter"`
|
||||
Invalidates int `bson:"invalidates" json:"invalidates"`
|
||||
IsEOF int `bson:"isEOF" json:"isEOF"`
|
||||
NReturned int `bson:"nReturned" json:"nReturned"`
|
||||
NeedTime int `bson:"needTime" json:"needTime"`
|
||||
NeedYield int `bson:"needYield" json:"needYield"`
|
||||
RestoreState int `bson:"restoreState" json:"restoreState"`
|
||||
SaveState int `bson:"saveState" json:"saveState"`
|
||||
Stage string `bson:"stage" json:"stage"`
|
||||
Works int `bson:"works" json:"works"`
|
||||
} `bson:"inputStage" json:"inputStage"`
|
||||
Invalidates int `bson:"invalidates" json:"invalidates"`
|
||||
IsEOF int `bson:"isEOF" json:"isEOF"`
|
||||
LimitAmount int `bson:"limitAmount" json:"limitAmount"`
|
||||
NReturned int `bson:"nReturned" json:"nReturned"`
|
||||
NeedTime int `bson:"needTime" json:"needTime"`
|
||||
NeedYield int `bson:"needYield" json:"needYield"`
|
||||
RestoreState int `bson:"restoreState" json:"restoreState"`
|
||||
SaveState int `bson:"saveState" json:"saveState"`
|
||||
Stage string `bson:"stage" json:"stage"`
|
||||
Works int `bson:"works" json:"works"`
|
||||
DocsExamined int `bson:"docsExamined" json:"docsExamined"`
|
||||
} `bson:"execStats" json:"execStats"`
|
||||
KeyUpdates int `bson:"keyUpdates" json:"keyUpdates"`
|
||||
KeysExamined int `bson:"keysExamined" json:"keysExamined"`
|
||||
Locks struct {
|
||||
Collection struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"R"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"Collection"`
|
||||
Read int `bson:"R" json:"R"`
|
||||
ReadShared int `bson:"r" json:"r"`
|
||||
} `bson:"acquireCount" json:"acquireCount"`
|
||||
} `bson:"Collection" json:"Collection"`
|
||||
Database struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"r"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"Database"`
|
||||
ReadShared int `bson:"r" json:"r"`
|
||||
} `bson:"acquireCount" json:"acquireCount"`
|
||||
AcquireWaitCount struct {
|
||||
ReadShared int `bson:"r" json:"r"`
|
||||
} `bson:"acquireWaitCount" json:"acquireWaitCount"`
|
||||
TimeAcquiringMicros struct {
|
||||
ReadShared int64 `bson:"r" json:"r"`
|
||||
} `bson:"timeAcquiringMicros" json:"timeAcquiringMicros"`
|
||||
} `bson:"Database" json:"Database"`
|
||||
Global struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"r"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"Global"`
|
||||
ReadShared int `bson:"r" json:"r"`
|
||||
WriteShared int `bson:"w" json:"w"`
|
||||
} `bson:"acquireCount" json:"acquireCount"`
|
||||
} `bson:"Global" json:"Global"`
|
||||
MMAPV1Journal struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"r"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"MMAPV1Journal"`
|
||||
} `bson:"locks"`
|
||||
Millis int `bson:"millis"`
|
||||
Nreturned int `bson:"nreturned"`
|
||||
Ns string `bson:"ns"`
|
||||
NumYield int `bson:"numYield"`
|
||||
Op string `bson:"op"`
|
||||
Protocol string `bson:"protocol"`
|
||||
Query bson.D `bson:"query"`
|
||||
UpdateObj bson.D `bson:"updateobj"`
|
||||
Command bson.D `bson:"command"`
|
||||
OriginatingCommand bson.D `bson:"originatingCommand"`
|
||||
ResponseLength int `bson:"responseLength"`
|
||||
Ts time.Time `bson:"ts"`
|
||||
User string `bson:"user"`
|
||||
WriteConflicts int `bson:"writeConflicts"`
|
||||
ReadShared int `bson:"r" json:"r"`
|
||||
} `bson:"acquireCount" json:"acquireCount"`
|
||||
} `bson:"MMAPV1Journal" json:"MMAPV1Journal"`
|
||||
} `bson:"locks" json:"locks"`
|
||||
Millis int `bson:"millis" json:"durationMillis"`
|
||||
Nreturned int `bson:"nreturned" json:"nreturned"`
|
||||
Ns string `bson:"ns" json:"ns"`
|
||||
NumYield int `bson:"numYield" json:"numYield"`
|
||||
Op string `bson:"op" json:"op"`
|
||||
PlanSummary string `bson:"planSummary" json:"planSummary"`
|
||||
Protocol string `bson:"protocol" json:"protocol"`
|
||||
Query bson.D `bson:"query" json:"query"`
|
||||
UpdateObj bson.D `bson:"updateobj" json:"updateobj"`
|
||||
Command bson.D `bson:"command" json:"command"`
|
||||
OriginatingCommand bson.D `bson:"originatingCommand" json:"originatingCommand"`
|
||||
ResponseLength int `bson:"responseLength" json:"reslen"`
|
||||
Ts time.Time `bson:"ts" json:"ts"`
|
||||
User string `bson:"user" json:"user"`
|
||||
WriteConflicts int `bson:"writeConflicts" json:"writeConflicts"`
|
||||
DocsExamined int `bson:"docsExamined" json:"docsExamined"`
|
||||
QueryHash string `bson:"queryHash" json:"queryHash"`
|
||||
Storage struct {
|
||||
Data struct {
|
||||
BytesRead int64 `bson:"bytesRead" json:"bytesRead"`
|
||||
TimeReadingMicros int64 `bson:"timeReadingMicros" json:"timeReadingMicros"`
|
||||
} `bson:"data" json:"data"`
|
||||
} `bson:"storage" json:"storage"`
|
||||
AppName string `bson:"appName" json:"appName"`
|
||||
Comments string `bson:"comments" json:"comments"`
|
||||
}
|
||||
|
||||
func NewExampleQuery(doc SystemProfile) ExampleQuery {
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +14,10 @@ import (
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
planSummaryCollScan = "COLLSCAN"
|
||||
)
|
||||
|
||||
type StatsError struct {
|
||||
error
|
||||
}
|
||||
@@ -86,18 +91,23 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
|
||||
Namespace: fp.Namespace,
|
||||
TableScan: false,
|
||||
Query: string(queryBson),
|
||||
PlanSummary: doc.PlanSummary,
|
||||
QueryHash: doc.QueryHash,
|
||||
AppName: doc.AppName,
|
||||
Client: doc.Client,
|
||||
User: strings.Split(doc.User, "@")[0],
|
||||
Comments: doc.Comments,
|
||||
}
|
||||
s.setQueryInfoAndCounters(key, qiac)
|
||||
}
|
||||
qiac.Count++
|
||||
// docsExamined is renamed from nscannedObjects in 3.2.0.
|
||||
// https://docs.mongodb.com/manual/reference/database-profiler/#system.profile.docsExamined
|
||||
s.Lock()
|
||||
if doc.NscannedObjects > 0 {
|
||||
qiac.NScanned = append(qiac.NScanned, float64(doc.NscannedObjects))
|
||||
} else {
|
||||
qiac.NScanned = append(qiac.NScanned, float64(doc.DocsExamined))
|
||||
if qiac.PlanSummary == planSummaryCollScan {
|
||||
qiac.CollScanCount++
|
||||
}
|
||||
|
||||
qiac.PlanSummary = strings.Split(qiac.PlanSummary, " ")[0]
|
||||
|
||||
qiac.NReturned = append(qiac.NReturned, float64(doc.Nreturned))
|
||||
qiac.QueryTime = append(qiac.QueryTime, float64(doc.Millis))
|
||||
qiac.ResponseLength = append(qiac.ResponseLength, float64(doc.ResponseLength))
|
||||
@@ -107,6 +117,42 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
|
||||
if qiac.LastSeen.IsZero() || qiac.LastSeen.Before(doc.Ts) {
|
||||
qiac.LastSeen = doc.Ts
|
||||
}
|
||||
|
||||
if doc.DocsExamined > 0 {
|
||||
qiac.DocsExamined = append(qiac.DocsExamined, float64(doc.DocsExamined))
|
||||
}
|
||||
if doc.KeysExamined > 0 {
|
||||
qiac.KeysExamined = append(qiac.KeysExamined, float64(doc.KeysExamined))
|
||||
}
|
||||
if doc.Locks.Global.AcquireCount.ReadShared > 0 {
|
||||
qiac.LocksGlobalAcquireCountReadSharedCount++
|
||||
qiac.LocksGlobalAcquireCountReadShared += doc.Locks.Global.AcquireCount.ReadShared
|
||||
}
|
||||
if doc.Locks.Global.AcquireCount.WriteShared > 0 {
|
||||
qiac.LocksGlobalAcquireCountWriteSharedCount++
|
||||
qiac.LocksGlobalAcquireCountWriteShared += doc.Locks.Global.AcquireCount.WriteShared
|
||||
}
|
||||
if doc.Locks.Database.AcquireCount.ReadShared > 0 {
|
||||
qiac.LocksDatabaseAcquireCountReadSharedCount++
|
||||
qiac.LocksDatabaseAcquireCountReadShared += doc.Locks.Database.AcquireCount.ReadShared
|
||||
}
|
||||
if doc.Locks.Database.AcquireWaitCount.ReadShared > 0 {
|
||||
qiac.LocksDatabaseAcquireWaitCountReadSharedCount++
|
||||
qiac.LocksDatabaseAcquireWaitCountReadShared += doc.Locks.Database.AcquireWaitCount.ReadShared
|
||||
}
|
||||
if doc.Locks.Database.TimeAcquiringMicros.ReadShared > 0 {
|
||||
qiac.LocksDatabaseTimeAcquiringMicrosReadShared = append(qiac.LocksDatabaseTimeAcquiringMicrosReadShared, float64(doc.Locks.Database.TimeAcquiringMicros.ReadShared))
|
||||
}
|
||||
if doc.Locks.Collection.AcquireCount.ReadShared > 0 {
|
||||
qiac.LocksCollectionAcquireCountReadSharedCount++
|
||||
qiac.LocksCollectionAcquireCountReadShared += doc.Locks.Collection.AcquireCount.ReadShared
|
||||
}
|
||||
if doc.Storage.Data.BytesRead > 0 {
|
||||
qiac.StorageBytesRead = append(qiac.StorageBytesRead, float64(doc.Storage.Data.BytesRead))
|
||||
}
|
||||
if doc.Storage.Data.TimeReadingMicros > 0 {
|
||||
qiac.StorageTimeReadingMicros = append(qiac.StorageTimeReadingMicros, float64(doc.Storage.Data.TimeReadingMicros))
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return nil
|
||||
@@ -185,9 +231,34 @@ type QueryInfoAndCounters struct {
|
||||
BlockedTime Times
|
||||
LockTime Times
|
||||
NReturned []float64
|
||||
NScanned []float64
|
||||
QueryTime []float64 // in milliseconds
|
||||
ResponseLength []float64
|
||||
|
||||
PlanSummary string
|
||||
CollScanCount int
|
||||
|
||||
DocsExamined []float64
|
||||
KeysExamined []float64
|
||||
QueryHash string
|
||||
AppName string
|
||||
Client string
|
||||
User string
|
||||
Comments string
|
||||
|
||||
LocksGlobalAcquireCountReadSharedCount int
|
||||
LocksGlobalAcquireCountReadShared int
|
||||
LocksGlobalAcquireCountWriteSharedCount int
|
||||
LocksGlobalAcquireCountWriteShared int
|
||||
LocksDatabaseAcquireCountReadSharedCount int
|
||||
LocksDatabaseAcquireCountReadShared int
|
||||
LocksDatabaseAcquireWaitCountReadSharedCount int
|
||||
LocksDatabaseAcquireWaitCountReadShared int
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared []float64 // in microseconds
|
||||
LocksCollectionAcquireCountReadSharedCount int
|
||||
LocksCollectionAcquireCountReadShared int
|
||||
|
||||
StorageBytesRead []float64
|
||||
StorageTimeReadingMicros []float64 // in microseconds
|
||||
}
|
||||
|
||||
// times is an array of time.Time that implements the Sorter interface
|
||||
@@ -214,11 +285,15 @@ func (g GroupKey) String() string {
|
||||
}
|
||||
|
||||
type totalCounters struct {
|
||||
Count int
|
||||
Scanned float64
|
||||
Returned float64
|
||||
QueryTime float64
|
||||
Bytes float64
|
||||
Count int
|
||||
Returned float64
|
||||
QueryTime float64
|
||||
Bytes float64
|
||||
DocsExamined float64
|
||||
KeysExamined float64
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared float64
|
||||
StorageBytesRead float64
|
||||
StorageTimeReadingMicros float64
|
||||
}
|
||||
|
||||
type QueryStats struct {
|
||||
@@ -230,14 +305,44 @@ type QueryStats struct {
|
||||
FirstSeen time.Time
|
||||
LastSeen time.Time
|
||||
|
||||
Count int
|
||||
QPS float64
|
||||
Rank int
|
||||
Ratio float64
|
||||
QueryTime Statistics
|
||||
ResponseLength Statistics
|
||||
Returned Statistics
|
||||
Scanned Statistics
|
||||
Count int
|
||||
QPS float64
|
||||
Rank int
|
||||
Ratio float64
|
||||
QueryTime Statistics
|
||||
ResponseLengthCount int
|
||||
ResponseLength Statistics
|
||||
Returned Statistics
|
||||
|
||||
PlanSummary string
|
||||
CollScanCount int
|
||||
DocsExaminedCount int
|
||||
DocsExamined Statistics
|
||||
KeysExaminedCount int
|
||||
KeysExamined Statistics
|
||||
QueryHash string
|
||||
AppName string
|
||||
Client string
|
||||
User string
|
||||
Comments string
|
||||
|
||||
LocksGlobalAcquireCountReadSharedCount int
|
||||
LocksGlobalAcquireCountReadShared int
|
||||
LocksGlobalAcquireCountWriteSharedCount int
|
||||
LocksGlobalAcquireCountWriteShared int
|
||||
LocksDatabaseAcquireCountReadSharedCount int
|
||||
LocksDatabaseAcquireCountReadShared int
|
||||
LocksDatabaseAcquireWaitCountReadSharedCount int
|
||||
LocksDatabaseAcquireWaitCountReadShared int
|
||||
LocksDatabaseTimeAcquiringMicrosReadSharedCount int
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared Statistics // in microseconds
|
||||
LocksCollectionAcquireCountReadSharedCount int
|
||||
LocksCollectionAcquireCountReadShared int
|
||||
|
||||
StorageBytesReadCount int
|
||||
StorageBytesRead Statistics
|
||||
StorageTimeReadingMicrosCount int
|
||||
StorageTimeReadingMicros Statistics // in microseconds
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
@@ -254,22 +359,46 @@ type Statistics struct {
|
||||
|
||||
func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters) QueryStats {
|
||||
queryStats := QueryStats{
|
||||
Count: query.Count,
|
||||
ID: query.ID,
|
||||
Operation: query.Operation,
|
||||
Query: query.Query,
|
||||
Fingerprint: query.Fingerprint,
|
||||
Scanned: calcStats(query.NScanned),
|
||||
Returned: calcStats(query.NReturned),
|
||||
QueryTime: calcStats(query.QueryTime),
|
||||
ResponseLength: calcStats(query.ResponseLength),
|
||||
FirstSeen: query.FirstSeen,
|
||||
LastSeen: query.LastSeen,
|
||||
Namespace: query.Namespace,
|
||||
QPS: float64(query.Count) / float64(uptime),
|
||||
}
|
||||
if tc.Scanned > 0 {
|
||||
queryStats.Scanned.Pct = queryStats.Scanned.Total * 100 / tc.Scanned
|
||||
Count: query.Count,
|
||||
ID: query.ID,
|
||||
Operation: query.Operation,
|
||||
Query: query.Query,
|
||||
Fingerprint: query.Fingerprint,
|
||||
Returned: calcStats(query.NReturned),
|
||||
QueryTime: calcStats(query.QueryTime),
|
||||
FirstSeen: query.FirstSeen,
|
||||
LastSeen: query.LastSeen,
|
||||
Namespace: query.Namespace,
|
||||
QPS: float64(query.Count) / float64(uptime),
|
||||
PlanSummary: query.PlanSummary,
|
||||
CollScanCount: query.CollScanCount,
|
||||
ResponseLengthCount: len(query.ResponseLength),
|
||||
ResponseLength: calcStats(query.ResponseLength),
|
||||
DocsExaminedCount: len(query.DocsExamined),
|
||||
DocsExamined: calcStats(query.DocsExamined),
|
||||
KeysExaminedCount: len(query.KeysExamined),
|
||||
KeysExamined: calcStats(query.KeysExamined),
|
||||
QueryHash: query.QueryHash,
|
||||
AppName: query.AppName,
|
||||
Client: query.Client,
|
||||
User: query.User,
|
||||
Comments: query.Comments,
|
||||
LocksGlobalAcquireCountReadSharedCount: query.LocksGlobalAcquireCountReadSharedCount,
|
||||
LocksGlobalAcquireCountReadShared: query.LocksGlobalAcquireCountReadShared,
|
||||
LocksGlobalAcquireCountWriteSharedCount: query.LocksGlobalAcquireCountWriteSharedCount,
|
||||
LocksGlobalAcquireCountWriteShared: query.LocksGlobalAcquireCountWriteShared,
|
||||
LocksDatabaseAcquireCountReadSharedCount: query.LocksDatabaseAcquireCountReadSharedCount,
|
||||
LocksDatabaseAcquireCountReadShared: query.LocksDatabaseAcquireCountReadShared,
|
||||
LocksDatabaseAcquireWaitCountReadSharedCount: query.LocksDatabaseAcquireWaitCountReadSharedCount,
|
||||
LocksDatabaseAcquireWaitCountReadShared: query.LocksDatabaseAcquireWaitCountReadShared,
|
||||
LocksDatabaseTimeAcquiringMicrosReadSharedCount: len(query.LocksDatabaseTimeAcquiringMicrosReadShared),
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared: calcStats(query.LocksDatabaseTimeAcquiringMicrosReadShared),
|
||||
LocksCollectionAcquireCountReadSharedCount: query.LocksCollectionAcquireCountReadSharedCount,
|
||||
LocksCollectionAcquireCountReadShared: query.LocksCollectionAcquireCountReadShared,
|
||||
StorageBytesReadCount: len(query.StorageBytesRead),
|
||||
StorageBytesRead: calcStats(query.StorageBytesRead),
|
||||
StorageTimeReadingMicrosCount: len(query.StorageTimeReadingMicros),
|
||||
StorageTimeReadingMicros: calcStats(query.StorageTimeReadingMicros),
|
||||
}
|
||||
if tc.Returned > 0 {
|
||||
queryStats.Returned.Pct = queryStats.Returned.Total * 100 / tc.Returned
|
||||
@@ -281,7 +410,22 @@ func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters)
|
||||
queryStats.ResponseLength.Pct = queryStats.ResponseLength.Total * 100 / tc.Bytes
|
||||
}
|
||||
if queryStats.Returned.Total > 0 {
|
||||
queryStats.Ratio = queryStats.Scanned.Total / queryStats.Returned.Total
|
||||
queryStats.Ratio = queryStats.DocsExamined.Total / queryStats.Returned.Total
|
||||
}
|
||||
if tc.DocsExamined > 0 {
|
||||
queryStats.DocsExamined.Pct = queryStats.DocsExamined.Total * 100 / tc.DocsExamined
|
||||
}
|
||||
if tc.KeysExamined > 0 {
|
||||
queryStats.KeysExamined.Pct = queryStats.KeysExamined.Total * 100 / tc.KeysExamined
|
||||
}
|
||||
if tc.LocksDatabaseTimeAcquiringMicrosReadShared > 0 {
|
||||
queryStats.LocksDatabaseTimeAcquiringMicrosReadShared.Pct = queryStats.LocksDatabaseTimeAcquiringMicrosReadShared.Total * 100 / tc.LocksDatabaseTimeAcquiringMicrosReadShared
|
||||
}
|
||||
if tc.StorageBytesRead > 0 {
|
||||
queryStats.StorageBytesRead.Pct = queryStats.StorageBytesRead.Total * 100 / tc.StorageBytesRead
|
||||
}
|
||||
if tc.StorageTimeReadingMicros > 0 {
|
||||
queryStats.StorageTimeReadingMicros.Pct = queryStats.StorageTimeReadingMicros.Total * 100 / tc.StorageTimeReadingMicros
|
||||
}
|
||||
|
||||
return queryStats
|
||||
@@ -291,10 +435,14 @@ func aggregateCounters(queries []QueryInfoAndCounters) QueryInfoAndCounters {
|
||||
qt := QueryInfoAndCounters{}
|
||||
for _, query := range queries {
|
||||
qt.Count += query.Count
|
||||
qt.NScanned = append(qt.NScanned, query.NScanned...)
|
||||
qt.NReturned = append(qt.NReturned, query.NReturned...)
|
||||
qt.QueryTime = append(qt.QueryTime, query.QueryTime...)
|
||||
qt.ResponseLength = append(qt.ResponseLength, query.ResponseLength...)
|
||||
qt.DocsExamined = append(qt.DocsExamined, query.DocsExamined...)
|
||||
qt.KeysExamined = append(qt.KeysExamined, query.KeysExamined...)
|
||||
qt.LocksDatabaseTimeAcquiringMicrosReadShared = append(qt.LocksDatabaseTimeAcquiringMicrosReadShared, query.LocksDatabaseTimeAcquiringMicrosReadShared...)
|
||||
qt.StorageBytesRead = append(qt.StorageBytesRead, query.StorageBytesRead...)
|
||||
qt.StorageTimeReadingMicros = append(qt.StorageTimeReadingMicros, query.StorageTimeReadingMicros...)
|
||||
}
|
||||
return qt
|
||||
}
|
||||
@@ -305,9 +453,6 @@ func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters {
|
||||
for _, query := range queries {
|
||||
tc.Count += query.Count
|
||||
|
||||
scanned, _ := stats.Sum(query.NScanned)
|
||||
tc.Scanned += scanned
|
||||
|
||||
returned, _ := stats.Sum(query.NReturned)
|
||||
tc.Returned += returned
|
||||
|
||||
@@ -316,11 +461,30 @@ func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters {
|
||||
|
||||
bytes, _ := stats.Sum(query.ResponseLength)
|
||||
tc.Bytes += bytes
|
||||
|
||||
docsExamined, _ := stats.Sum(query.DocsExamined)
|
||||
tc.DocsExamined += docsExamined
|
||||
|
||||
keysExamined, _ := stats.Sum(query.KeysExamined)
|
||||
tc.KeysExamined += keysExamined
|
||||
|
||||
locksDatabaseTimeAcquiringMicrosReadShared, _ := stats.Sum(query.LocksDatabaseTimeAcquiringMicrosReadShared)
|
||||
tc.LocksDatabaseTimeAcquiringMicrosReadShared += locksDatabaseTimeAcquiringMicrosReadShared
|
||||
|
||||
storageBytesRead, _ := stats.Sum(query.StorageBytesRead)
|
||||
tc.StorageBytesRead += storageBytesRead
|
||||
|
||||
storageTimeReadingMicros, _ := stats.Sum(query.StorageTimeReadingMicros)
|
||||
tc.StorageTimeReadingMicros += storageTimeReadingMicros
|
||||
}
|
||||
return tc
|
||||
}
|
||||
|
||||
func calcStats(samples []float64) Statistics {
|
||||
if len(samples) == 0 {
|
||||
return Statistics{}
|
||||
}
|
||||
|
||||
var s Statistics
|
||||
s.Total, _ = stats.Sum(samples)
|
||||
s.Min, _ = stats.Min(samples)
|
||||
|
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
@@ -40,8 +41,8 @@ func TestMain(m *testing.M) {
|
||||
log.Printf("cannot get root path: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
// TODO: Review with the new sandbox
|
||||
// os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestTimesLen(t *testing.T) {
|
||||
@@ -158,9 +159,9 @@ func TestStats(t *testing.T) {
|
||||
BlockedTime: nil,
|
||||
LockTime: nil,
|
||||
NReturned: []float64{0},
|
||||
NScanned: []float64{10000},
|
||||
QueryTime: []float64{7},
|
||||
ResponseLength: []float64{215},
|
||||
DocsExamined: []float64{10000},
|
||||
}
|
||||
|
||||
want := Queries{
|
||||
|
@@ -28,31 +28,33 @@ type sslSecret struct {
|
||||
|
||||
// Dumper struct is for dumping cluster
|
||||
type Dumper struct {
|
||||
cmd string
|
||||
kubeconfig string
|
||||
resources []string
|
||||
filePaths []string
|
||||
fileContainer string
|
||||
namespace string
|
||||
location string
|
||||
errors string
|
||||
mode int64
|
||||
crType string
|
||||
forwardport string
|
||||
sslSecrets []sslSecret
|
||||
cmd string
|
||||
kubeconfig string
|
||||
resources []string
|
||||
filePaths []string
|
||||
fileContainer string
|
||||
namespace string
|
||||
location string
|
||||
errors string
|
||||
mode int64
|
||||
crType string
|
||||
forwardport string
|
||||
sslSecrets []sslSecret
|
||||
skipPodSummary bool
|
||||
}
|
||||
|
||||
var resourcesRe = regexp.MustCompile(`(\w+\.(\w+).percona\.com)`)
|
||||
|
||||
// New return new Dumper object
|
||||
func New(location, namespace, resource string, kubeconfig string, forwardport string) Dumper {
|
||||
func New(location, namespace, resource string, kubeconfig string, forwardport string, skipPodSummary bool) Dumper {
|
||||
d := Dumper{
|
||||
cmd: "kubectl",
|
||||
kubeconfig: kubeconfig,
|
||||
location: "cluster-dump",
|
||||
mode: int64(0o777),
|
||||
namespace: namespace,
|
||||
forwardport: forwardport,
|
||||
cmd: "kubectl",
|
||||
kubeconfig: kubeconfig,
|
||||
location: "cluster-dump",
|
||||
mode: int64(0o777),
|
||||
namespace: namespace,
|
||||
forwardport: forwardport,
|
||||
skipPodSummary: skipPodSummary,
|
||||
}
|
||||
resources := []string{
|
||||
"pods",
|
||||
@@ -352,18 +354,20 @@ func (d *Dumper) DumpCluster() error {
|
||||
crName = pod.Labels["app.kubernetes.io/instance"]
|
||||
}
|
||||
// Get summary
|
||||
output, err = d.getPodSummary(resourceType(d.crType), pod.Name, crName, ns.Name)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), d.crType, pod.Name)
|
||||
err = addToArchive(location, d.mode, []byte(err.Error()), tw)
|
||||
if !d.skipPodSummary {
|
||||
output, err = d.getPodSummary(resourceType(d.crType), pod.Name, crName, ns.Name)
|
||||
if err != nil {
|
||||
log.Printf("Error: create summary errors archive for pod %s in namespace %s: %v", pod.Name, ns.Name, err)
|
||||
}
|
||||
} else {
|
||||
err = addToArchive(location, d.mode, output, tw)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), "create summary archive for pod "+pod.Name)
|
||||
log.Printf("Error: create summary archive for pod %s: %v", pod.Name, err)
|
||||
d.logError(err.Error(), d.crType, pod.Name)
|
||||
err = addToArchive(location, d.mode, []byte(err.Error()), tw)
|
||||
if err != nil {
|
||||
log.Printf("Error: create summary errors archive for pod %s in namespace %s: %v", pod.Name, ns.Name, err)
|
||||
}
|
||||
} else {
|
||||
err = addToArchive(location, d.mode, output, tw)
|
||||
if err != nil {
|
||||
d.logError(err.Error(), "create summary archive for pod "+pod.Name)
|
||||
log.Printf("Error: create summary archive for pod %s: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -28,6 +28,7 @@ func main() {
|
||||
kubeconfig := ""
|
||||
forwardport := ""
|
||||
version := false
|
||||
skipPodSummary := false
|
||||
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace for collecting data. If empty data will be collected from all namespaces")
|
||||
flag.StringVar(&resource, "resource", "auto", "Collect data, specific to the resource. Supported values: pxc, psmdb, pg, pgv2, ps, none, auto")
|
||||
@@ -35,6 +36,7 @@ func main() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to kubeconfig")
|
||||
flag.StringVar(&forwardport, "forwardport", "", "Port to use for port forwarding")
|
||||
flag.BoolVar(&version, "version", false, "Print version")
|
||||
flag.BoolVar(&skipPodSummary, "skip-pod-summary", false, "Skip pod summary collection")
|
||||
flag.Parse()
|
||||
|
||||
if version {
|
||||
@@ -50,7 +52,7 @@ func main() {
|
||||
resource += "/" + clusterName
|
||||
}
|
||||
|
||||
d := dumper.New("", namespace, resource, kubeconfig, forwardport)
|
||||
d := dumper.New("", namespace, resource, kubeconfig, forwardport, skipPodSummary)
|
||||
log.Println("Start collecting cluster data")
|
||||
|
||||
err := d.DumpCluster()
|
||||
|
@@ -365,6 +365,106 @@ func TestSSLResourceOption(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Tests for option --skip-pod-summary
|
||||
*/
|
||||
func TestPT_2453(t *testing.T) {
|
||||
testcmd := []string{"sh", "-c", "tar -tf cluster-dump.tar.gz --wildcards '*/summary.txt' 2>/dev/null | wc -l"}
|
||||
tests := []struct {
|
||||
name string
|
||||
resource string
|
||||
want string
|
||||
kubeconfig string
|
||||
}{
|
||||
{
|
||||
name: "none",
|
||||
resource: "none",
|
||||
want: "0",
|
||||
kubeconfig: "",
|
||||
},
|
||||
{
|
||||
name: "pxc",
|
||||
resource: "pxc",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PXC"),
|
||||
},
|
||||
{
|
||||
name: "ps",
|
||||
resource: "ps",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PS"),
|
||||
},
|
||||
{
|
||||
name: "psmdb",
|
||||
resource: "psmdb",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PSMDB"),
|
||||
},
|
||||
{
|
||||
name: "pg",
|
||||
resource: "pg",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PG"),
|
||||
},
|
||||
{
|
||||
name: "pgv2",
|
||||
resource: "pgv2",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PG2"),
|
||||
},
|
||||
{
|
||||
name: "auto pxc",
|
||||
resource: "auto",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PXC"),
|
||||
},
|
||||
{
|
||||
name: "auto ps",
|
||||
resource: "auto",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PS"),
|
||||
},
|
||||
{
|
||||
name: "auto psmdb",
|
||||
resource: "auto",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PSMDB"),
|
||||
},
|
||||
{
|
||||
name: "auto pg",
|
||||
resource: "auto",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PG"),
|
||||
},
|
||||
{
|
||||
name: "auto pgv2",
|
||||
resource: "auto",
|
||||
want: "0",
|
||||
kubeconfig: os.Getenv("KUBECONFIG_PG2"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
cmd := exec.Command("../../../bin/pt-k8s-debug-collector", "--kubeconfig", test.kubeconfig, "--forwardport", os.Getenv("FORWARDPORT"), "--resource", test.resource, "--skip-pod-summary")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Errorf("error executing pt-k8s-debug-collector: %s\nCommand: %s", err.Error(), cmd.String())
|
||||
}
|
||||
defer func() {
|
||||
cmd = exec.Command("rm", "-f", "cluster-dump.tar.gz")
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Errorf("error cleaning up test data: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
out, err := exec.Command(testcmd[0], testcmd[1:]...).Output()
|
||||
if err != nil {
|
||||
t.Errorf("test %s, error running command %s:\n%s\n\nCommand output:\n%s", test.name, testcmd, err.Error(), out)
|
||||
}
|
||||
if strings.TrimRight(bytes.NewBuffer(out).String(), "\n") != test.want {
|
||||
t.Errorf("test %s, output is not as expected\nOutput: %s\nWanted: %s", test.name, out, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Option --version
|
||||
*/
|
||||
|
@@ -54,7 +54,7 @@ Options
|
||||
|
||||
``-o``, ``--order-by``
|
||||
Specifies the sorting order using fields:
|
||||
``count``, ``ratio``, ``query-time``, ``docs-scanned``, ``docs-returned``.
|
||||
``count``, ``ratio``, ``query-time``, ``docs-examined``, ``docs-returned``.
|
||||
|
||||
Adding a hyphen (``-``) in front of a field denotes reverse order.
|
||||
For example: ``--order-by="count,-ratio"``.
|
||||
@@ -94,13 +94,13 @@ Output Example
|
||||
.. code-block:: none
|
||||
|
||||
# Query 3: 0.06 QPS, ID 0b906bd86148def663d11b402f3e41fa
|
||||
# Ratio 1.00 (docs scanned/returned)
|
||||
# Ratio 1.00 (docs examined/returned)
|
||||
# Time range: 2017-02-03 16:01:37.484 -0300 ART to 2017-02-03 16:02:08.43 -0300 ART
|
||||
# Attribute pct total min max avg 95% stddev median
|
||||
# ================== === ======== ======== ======== ======== ======== ======= ========
|
||||
# Count (docs) 100
|
||||
# Exec Time ms 2 3 0 1 0 0 0 0
|
||||
# Docs Scanned 5 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
|
||||
# Docs Examined 5 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
|
||||
# Docs Returned 92 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
|
||||
# Bytes recv 1 106.12M 1.06M 1.06M 1.06M 1.06M 0.00 1.06M
|
||||
# String:
|
||||
|
@@ -497,23 +497,23 @@ func sortQueries(queries []stats.QueryStats, orderby []string) []stats.QueryStat
|
||||
}
|
||||
|
||||
//
|
||||
case "docs-scanned":
|
||||
case "docs-examined":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Scanned.Max < c2.Scanned.Max
|
||||
return c1.DocsExamined.Max < c2.DocsExamined.Max
|
||||
}
|
||||
case "-docs-scanned":
|
||||
case "-docs-examined":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Scanned.Max > c2.Scanned.Max
|
||||
return c1.DocsExamined.Max > c2.DocsExamined.Max
|
||||
}
|
||||
|
||||
//
|
||||
case "docs-returned":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Returned.Max < c2.Scanned.Max
|
||||
return c1.Returned.Max < c2.DocsExamined.Max
|
||||
}
|
||||
case "-docs-returned":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Returned.Max > c2.Scanned.Max
|
||||
return c1.Returned.Max > c2.DocsExamined.Max
|
||||
}
|
||||
}
|
||||
// count,query-time,docs-scanned, docs-returned. - in front of the field name denotes reverse order.")
|
||||
|
@@ -32,7 +32,8 @@ var logger = logrus.New()
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logger.SetLevel(logrus.WarnLevel)
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestConnection(t *testing.T) {
|
||||
|
@@ -15,7 +15,7 @@ Usage
|
||||
|
||||
::
|
||||
|
||||
pt-secure-data [<flags>] <command> [<args> ...]
|
||||
pt-secure-collect [<flags>] <command> [<args> ...]
|
||||
|
||||
By default, :program:`pt-secure-collect` will collect the output of:
|
||||
|
||||
|
@@ -7,6 +7,7 @@ BEGIN {
|
||||
};
|
||||
|
||||
use strict;
|
||||
use utf8;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
@@ -339,6 +340,85 @@ SKIP: {
|
||||
);
|
||||
};
|
||||
|
||||
# #############################################################################
|
||||
# PT-2375: pt-table-sync must handle generated columns correctly
|
||||
# #############################################################################
|
||||
$row = {
|
||||
id => 1,
|
||||
foo => 'foo',
|
||||
bar => 'bar',
|
||||
};
|
||||
$tbl_struct = {
|
||||
col_posn => { id=>0, foo=>1, bar=>2 },
|
||||
is_generated => {foo=>1}
|
||||
};
|
||||
$ch = new ChangeHandler(
|
||||
Quoter => $q,
|
||||
right_db => 'test', # dst
|
||||
right_tbl => 'pt-2375',
|
||||
left_db => 'test', # src
|
||||
left_tbl => 'pt-2375',
|
||||
actions => [ sub { push @rows, @_ } ],
|
||||
replace => 0,
|
||||
queue => 0,
|
||||
tbl_struct => $tbl_struct,
|
||||
);
|
||||
|
||||
@rows = ();
|
||||
@dbhs = ();
|
||||
|
||||
is(
|
||||
$ch->make_INSERT($row, [qw(id foo bar)]),
|
||||
"INSERT INTO `test`.`pt-2375`(`id`, `bar`) VALUES ('1', 'bar')",
|
||||
'make_INSERT() omits generated columns'
|
||||
);
|
||||
|
||||
is(
|
||||
$ch->make_REPLACE($row, [qw(id foo bar)]),
|
||||
"REPLACE INTO `test`.`pt-2375`(`id`, `bar`) VALUES ('1', 'bar')",
|
||||
'make_REPLACE() omits generated columns'
|
||||
);
|
||||
|
||||
is(
|
||||
$ch->make_UPDATE($row, [qw(id foo)]),
|
||||
"UPDATE `test`.`pt-2375` SET `bar`='bar' WHERE `id`='1' AND `foo`='foo' LIMIT 1",
|
||||
'make_UPDATE() omits generated columns from SET phrase but includes in WHERE phrase'
|
||||
);
|
||||
|
||||
is(
|
||||
$ch->make_DELETE($row, [qw(id foo bar)]),
|
||||
"DELETE FROM `test`.`pt-2375` WHERE `id`='1' AND `foo`='foo' AND `bar`='bar' LIMIT 1",
|
||||
'make_DELETE() includes generated columns in WHERE phrase'
|
||||
);
|
||||
|
||||
SKIP: {
|
||||
skip 'Cannot connect to sandbox master', 3 unless $master_dbh;
|
||||
|
||||
$master_dbh->do('DROP TABLE IF EXISTS test.`pt-2375`');
|
||||
$master_dbh->do('CREATE TABLE test.`pt-2375` (id INT, foo varchar(16) as ("foo"), bar char)');
|
||||
$master_dbh->do("INSERT INTO test.`pt-2375` (`id`, `bar`) VALUES (1,'a'),(2,'b')");
|
||||
|
||||
$ch->fetch_back($master_dbh);
|
||||
|
||||
is(
|
||||
$ch->make_INSERT($row, [qw(id foo)]),
|
||||
"INSERT INTO `test`.`pt-2375`(`id`, `bar`) VALUES ('1', 'a')",
|
||||
'make_INSERT() omits generated columns, with fetch-back'
|
||||
);
|
||||
|
||||
is(
|
||||
$ch->make_REPLACE($row, [qw(id foo)]),
|
||||
"REPLACE INTO `test`.`pt-2375`(`id`, `bar`) VALUES ('1', 'a')",
|
||||
'make_REPLACE() omits generated columns, with fetch-back'
|
||||
);
|
||||
|
||||
is(
|
||||
$ch->make_UPDATE($row, [qw(id foo)]),
|
||||
"UPDATE `test`.`pt-2375` SET `bar`='a' WHERE `id`='1' AND `foo`='foo' LIMIT 1",
|
||||
'make_UPDATE() omits generated columns from SET phrase, with fetch-back'
|
||||
);
|
||||
};
|
||||
|
||||
# #############################################################################
|
||||
# Issue 641: Make mk-table-sync use hex for binary/blob data
|
||||
# #############################################################################
|
||||
@@ -538,6 +618,52 @@ SKIP: {
|
||||
);
|
||||
}
|
||||
|
||||
# #############################################################################
|
||||
# PT-2377: pt-table-sync must handle utf8 in JSON columns correctly
|
||||
# #############################################################################
|
||||
SKIP: {
|
||||
skip 'Cannot connect to sandbox master', 1 unless $master_dbh;
|
||||
$master_dbh->do('DROP TABLE IF EXISTS `test`.`pt-2377`');
|
||||
$master_dbh->do('CREATE TABLE `test`.`pt-2377` (id INT, data JSON)');
|
||||
$master_dbh->do(q/INSERT INTO `test`.`pt-2377` VALUES (1, '{"name": "Müller"}')/);
|
||||
$master_dbh->do(q/INSERT INTO `test`.`pt-2377` VALUES (2, NULL)/);
|
||||
|
||||
@rows = ();
|
||||
$tbl_struct = {
|
||||
cols => [qw(id data)],
|
||||
col_posn => {id=>0, data=>1},
|
||||
type_for => {id=>'int', data=>'json'},
|
||||
};
|
||||
$ch = new ChangeHandler(
|
||||
Quoter => $q,
|
||||
left_db => 'test',
|
||||
left_tbl => 'pt-2377',
|
||||
right_db => 'test',
|
||||
right_tbl => 'pt-2377',
|
||||
actions => [ sub { push @rows, $_[0]; } ],
|
||||
replace => 0,
|
||||
queue => 0,
|
||||
tbl_struct => $tbl_struct,
|
||||
);
|
||||
$ch->fetch_back($master_dbh);
|
||||
|
||||
$ch->change('UPDATE', {id=>1}, [qw(id)] );
|
||||
$ch->change('INSERT', {id=>1}, [qw(id)] );
|
||||
$ch->change('UPDATE', {id=>2}, [qw(id)] );
|
||||
$ch->change('INSERT', {id=>2}, [qw(id)] );
|
||||
|
||||
is_deeply(
|
||||
\@rows,
|
||||
[
|
||||
q/UPDATE `test`.`pt-2377` SET `data`='{"name": "Müller"}' WHERE `id`='1' LIMIT 1/,
|
||||
q/INSERT INTO `test`.`pt-2377`(`id`, `data`) VALUES ('1', '{"name": "Müller"}')/,
|
||||
q/UPDATE `test`.`pt-2377` SET `data`=NULL WHERE `id`='2' LIMIT 1/,
|
||||
q/INSERT INTO `test`.`pt-2377`(`id`, `data`) VALUES ('2', NULL)/
|
||||
],
|
||||
"UPDATE and INSERT quote data regardless of how it looks if tbl_struct->quote_val is true"
|
||||
);
|
||||
}
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
@@ -71,6 +71,8 @@ is( $q->quote_val('0x89504E470', is_char => 0), '0x89504E470', 'hex string, with
|
||||
is( $q->quote_val('0x89504E470', is_char => 1), "'0x89504E470'", 'hex string, with is_char => 1');
|
||||
is( $q->quote_val('0x89504I470'), "'0x89504I470'", 'looks like hex string');
|
||||
is( $q->quote_val('eastside0x3'), "'eastside0x3'", 'looks like hex str (issue 1110');
|
||||
is( $q->quote_val(969.1 / 360, is_float => 1), "2.6919444444444447", 'float has full precision');
|
||||
is( $q->quote_val(0.1, is_float => 1), "0.1", 'full precision for float only used when required');
|
||||
|
||||
# Splitting DB and tbl apart
|
||||
is_deeply(
|
||||
|
@@ -123,11 +123,11 @@ $output = output(
|
||||
);
|
||||
$output = `cat archive.test.table_2`;
|
||||
is($output, <<EOF
|
||||
1, 2, 3, 4
|
||||
2, "\\N", 3, 4
|
||||
3, 2, 3, "\\\t"
|
||||
4, 2, 3, "\\\n"
|
||||
5, 2, 3, "Zapp \\"Brannigan"
|
||||
1,2,3,4
|
||||
2,\\N,3,4
|
||||
3,2,3,"\\\t"
|
||||
4,2,3,"\\\n"
|
||||
5,2,3,"Zapp \\"Brannigan"
|
||||
EOF
|
||||
, '--output-format=csv');
|
||||
`rm -f archive.test.table_2`;
|
||||
|
75
t/pt-archiver/pt-2410.t
Normal file
75
t/pt-archiver/pt-2410.t
Normal file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use charnames ':full';
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-archiver";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $dbh = $sb->get_dbh_for('source');
|
||||
|
||||
if ( !$dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
|
||||
my $output;
|
||||
my $exit_status;
|
||||
my $cnf = "/tmp/12345/my.sandbox.cnf";
|
||||
my $cmd = "$trunk/bin/pt-archiver";
|
||||
|
||||
$sb->wipe_clean($dbh);
|
||||
$sb->create_dbs($dbh, ['test']);
|
||||
|
||||
$sb->load_file('source', 't/pt-archiver/samples/pt-2410.sql');
|
||||
|
||||
($output, $exit_status) = full_output(
|
||||
sub { pt_archiver::main(
|
||||
qw(--where 1=1 --output-format=csv),
|
||||
'--source', "L=1,D=pt_2410,t=test,F=$cnf",
|
||||
'--file', '/tmp/pt-2410.csv') },
|
||||
);
|
||||
|
||||
is(
|
||||
$exit_status,
|
||||
0,
|
||||
'pt-archiver comleted'
|
||||
);
|
||||
|
||||
$output = `cat /tmp/pt-2410.csv`;
|
||||
like(
|
||||
$output,
|
||||
qr/1,\\N,"testing..."/,
|
||||
'NULL values stored correctly'
|
||||
) or diag($output);
|
||||
|
||||
$dbh->do("load data local infile '/tmp/pt-2410.csv' into table pt_2410.test COLUMNS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"'");
|
||||
|
||||
$output = `/tmp/12345/use pt_2410 -N -e 'SELECT * FROM test'`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/1 NULL testing.../,
|
||||
'NULL values loaded correctly'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
diag(`rm -f /tmp/pt-2410.csv`);
|
||||
$sb->wipe_clean($dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
done_testing;
|
||||
exit;
|
10
t/pt-archiver/samples/pt-2410.sql
Normal file
10
t/pt-archiver/samples/pt-2410.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
CREATE DATABASE pt_2410;
|
||||
USE pt_2410;
|
||||
|
||||
CREATE TABLE test(
|
||||
id int not null primary key auto_increment,
|
||||
column1 int default null,
|
||||
column2 varchar(50) not null);
|
||||
|
||||
INSERT INTO test VALUES (null,null,'testing...');
|
||||
INSERT INTO test VALUES (null,null,'testing...');
|
117
t/pt-config-diff/long_vars.t
Normal file
117
t/pt-config-diff/long_vars.t
Normal file
@@ -0,0 +1,117 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-config-diff";
|
||||
|
||||
require VersionParser;
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $dbh = $sb->get_dbh_for('source');
|
||||
|
||||
my ($ver, $reset, $set_short, $set_long);
|
||||
|
||||
if ( !$dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
|
||||
if ( $sandbox_version ge '5.7' ) {
|
||||
$reset = q{SET GLOBAL session_track_system_variables = ''};
|
||||
$set_short =
|
||||
q{SET GLOBAL session_track_system_variables = '}.
|
||||
q{activate_all_roles_on_login,admin_address,}.
|
||||
q{admin_port,admin_ssl_ca,admin_ssl_capath,admin_ssl_cert,}.
|
||||
q{admin_ssl_cipher,admin_ssl_crl,admin_ssl_crlpath,admin_ssl_key,}.
|
||||
q{admin_tls_ciphersuites,admin_tls_version,authentication_policy,}.
|
||||
q{auto_generate_certs,auto_increment_increment,}.
|
||||
q{auto_increment_offset,autocommit,automatic_sp_privileges,}.
|
||||
q{back_log,basedir,big_tables,bind_address,binlog_cache_size,}.
|
||||
q{binlog_checksum,binlog_ddl_skip_rewrite,}.
|
||||
q{binlog_direct_non_transactional_updates,binlog_encryption,}.
|
||||
q{binlog_error_action,binlog_expire_logs_auto_purge,}.
|
||||
q{binlog_expire_logs_seconds,binlog_format,binlog_group_commit_sync_delay,}.
|
||||
q{binlog_group_commit_sync_no_delay_count,binlog_gtid_simple_recovery,}.
|
||||
q{binlog_max_flush_queue_time,binlog_order_commits,}.
|
||||
q{binlog_rotate_encryption_master_key_at_startup,binlog_row_event_max_size,}.
|
||||
q{binlog_row_image,binlog_row_metadata,binlog_row_value_options,}.
|
||||
q{binlog_rows_query_log_events,binlog_skip_flush_commands,}.
|
||||
q{binlog_space_limit,binlog_stmt_cache_size'};
|
||||
$set_long =
|
||||
q{SET GLOBAL session_track_system_variables = '}.
|
||||
q{activate_all_roles_on_login,admin_address,}.
|
||||
q{admin_port,admin_ssl_ca,admin_ssl_capath,admin_ssl_cert,}.
|
||||
q{admin_ssl_cipher,admin_ssl_crl,admin_ssl_crlpath,admin_ssl_key,}.
|
||||
q{admin_tls_ciphersuites,admin_tls_version,authentication_policy,}.
|
||||
q{auto_generate_certs,auto_increment_increment,}.
|
||||
q{auto_increment_offset,autocommit,automatic_sp_privileges,}.
|
||||
q{back_log,basedir,big_tables,bind_address,binlog_cache_size,}.
|
||||
q{binlog_checksum,binlog_ddl_skip_rewrite,}.
|
||||
q{binlog_direct_non_transactional_updates,binlog_encryption,}.
|
||||
q{binlog_error_action,binlog_expire_logs_auto_purge,}.
|
||||
q{binlog_expire_logs_seconds,binlog_format,binlog_group_commit_sync_delay,}.
|
||||
q{binlog_group_commit_sync_no_delay_count,binlog_gtid_simple_recovery,}.
|
||||
q{binlog_max_flush_queue_time,binlog_order_commits,}.
|
||||
q{binlog_rotate_encryption_master_key_at_startup,binlog_row_event_max_size,}.
|
||||
q{binlog_row_image,binlog_row_metadata,binlog_row_value_options,}.
|
||||
q{binlog_rows_query_log_events,binlog_skip_flush_commands,}.
|
||||
q{binlog_space_limit,binlog_stmt_cache_size,}.
|
||||
q{binlog_transaction_compression,binlog_transaction_compression_level_zstd'};
|
||||
}
|
||||
else {
|
||||
plan skip_all => "Requires MySQL 5.7 or newer";
|
||||
}
|
||||
|
||||
my $output;
|
||||
my $retval;
|
||||
|
||||
$sb->do_as_root('source', $set_short);
|
||||
|
||||
$output = output(
|
||||
sub { $retval = pt_config_diff::main(
|
||||
"${trunk}/t/pt-config-diff/samples/long_vars_1.cnf",
|
||||
'h=127.1,P=12345,u=msandbox,p=msandbox')
|
||||
},
|
||||
stderr => 1,
|
||||
);
|
||||
|
||||
is(
|
||||
$retval,
|
||||
0,
|
||||
"No diff on variable value up to 1024 bytes long"
|
||||
);
|
||||
|
||||
$sb->do_as_root('source', $set_long);
|
||||
|
||||
$output = output(
|
||||
sub { $retval = pt_config_diff::main(
|
||||
"${trunk}/t/pt-config-diff/samples/long_vars_2.cnf",
|
||||
'h=127.1,P=12345,u=msandbox,p=msandbox')
|
||||
},
|
||||
stderr => 1,
|
||||
);
|
||||
|
||||
is(
|
||||
$retval,
|
||||
0,
|
||||
"No diff on variable value longer than 1024 bytes"
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
$sb->do_as_root('source', $reset);
|
||||
|
||||
$sb->wipe_clean($dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
done_testing;
|
2
t/pt-config-diff/samples/long_vars_1.cnf
Normal file
2
t/pt-config-diff/samples/long_vars_1.cnf
Normal file
@@ -0,0 +1,2 @@
|
||||
[mysqld]
|
||||
session_track_system_variables = 'activate_all_roles_on_login,admin_address,admin_port,admin_ssl_ca,admin_ssl_capath,admin_ssl_cert,admin_ssl_cipher,admin_ssl_crl,admin_ssl_crlpath,admin_ssl_key,admin_tls_ciphersuites,admin_tls_version,authentication_policy,auto_generate_certs,auto_increment_increment,auto_increment_offset,autocommit,automatic_sp_privileges,back_log,basedir,big_tables,bind_address,binlog_cache_size,binlog_checksum,binlog_ddl_skip_rewrite,binlog_direct_non_transactional_updates,binlog_encryption,binlog_error_action,binlog_expire_logs_auto_purge,binlog_expire_logs_seconds,binlog_format,binlog_group_commit_sync_delay,binlog_group_commit_sync_no_delay_count,binlog_gtid_simple_recovery,binlog_max_flush_queue_time,binlog_order_commits,binlog_rotate_encryption_master_key_at_startup,binlog_row_event_max_size,binlog_row_image,binlog_row_metadata,binlog_row_value_options,binlog_rows_query_log_events,binlog_skip_flush_commands,binlog_space_limit,binlog_stmt_cache_size'
|
2
t/pt-config-diff/samples/long_vars_2.cnf
Normal file
2
t/pt-config-diff/samples/long_vars_2.cnf
Normal file
@@ -0,0 +1,2 @@
|
||||
[mysqld]
|
||||
session_track_system_variables = 'activate_all_roles_on_login,admin_address,admin_port,admin_ssl_ca,admin_ssl_capath,admin_ssl_cert,admin_ssl_cipher,admin_ssl_crl,admin_ssl_crlpath,admin_ssl_key,admin_tls_ciphersuites,admin_tls_version,authentication_policy,auto_generate_certs,auto_increment_increment,auto_increment_offset,autocommit,automatic_sp_privileges,back_log,basedir,big_tables,bind_address,binlog_cache_size,binlog_checksum,binlog_ddl_skip_rewrite,binlog_direct_non_transactional_updates,binlog_encryption,binlog_error_action,binlog_expire_logs_auto_purge,binlog_expire_logs_seconds,binlog_format,binlog_group_commit_sync_delay,binlog_group_commit_sync_no_delay_count,binlog_gtid_simple_recovery,binlog_max_flush_queue_time,binlog_order_commits,binlog_rotate_encryption_master_key_at_startup,binlog_row_event_max_size,binlog_row_image,binlog_row_metadata,binlog_row_value_options,binlog_rows_query_log_events,binlog_skip_flush_commands,binlog_space_limit,binlog_stmt_cache_size,binlog_transaction_compression,binlog_transaction_compression_level_zstd'
|
@@ -137,7 +137,7 @@ set_delay();
|
||||
# We need to sleep, otherwise pt-osc can finish before replica is delayed
|
||||
sleep($max_lag);
|
||||
|
||||
my $args = "$source_dsn,D=test,t=pt1717 --execute --chunk-size ${chunk_size} --max-lag $max_lag --alter 'engine=INNODB' --pid $tmp_file_name --progress time,5 --no-drop-new-table --no-drop-triggers --history";
|
||||
my $args = "$source_dsn,D=test,t=pt1717 --execute --chunk-size ${chunk_size} --max-lag $max_lag --alter 'ADD COLUMN foo varchar(32)' --pid $tmp_file_name --progress time,5 --no-drop-new-table --no-drop-triggers --history";
|
||||
|
||||
$output = run_broken_job($args);
|
||||
|
||||
@@ -165,7 +165,7 @@ my @args = (qw(--execute --chunk-size=10 --history));
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=INNODB', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
);
|
||||
@@ -186,7 +186,7 @@ like(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--max-lag', $max_lag,
|
||||
'--resume', $job_id,
|
||||
'--alter', 'engine=INNODB',
|
||||
'--alter', 'ADD COLUMN foo varchar(32)',
|
||||
'--plugin', "$plugin/pt-1717.pm",
|
||||
),
|
||||
},
|
||||
@@ -208,8 +208,10 @@ ok(
|
||||
'All rows copied correctly'
|
||||
) or diag("New table checksum: '${new_table_checksum}', original content checksum: '${old_table_checksum}'");
|
||||
|
||||
diag(`/tmp/12345/use test -N -e "ALTER TABLE pt1717 DROP COLUMN foo"`);
|
||||
|
||||
# Tests for chunk-index and chunk-index-columns options
|
||||
$args = "$source_dsn,D=test,t=pt1717 --alter engine=innodb --execute --history --chunk-size=10 --no-drop-new-table --no-drop-triggers --reverse-triggers --chunk-index=f2";
|
||||
$args = "$source_dsn,D=test,t=pt1717 --alter 'ADD COLUMN foo varchar(32)' --execute --history --chunk-size=10 --no-drop-new-table --no-drop-triggers --reverse-triggers --chunk-index=f2";
|
||||
|
||||
set_delay();
|
||||
$output = run_broken_job($args);
|
||||
@@ -220,7 +222,7 @@ $job_id = $1;
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
) }
|
||||
);
|
||||
|
||||
@@ -238,7 +240,7 @@ like(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-index=f1'
|
||||
) }
|
||||
);
|
||||
@@ -257,7 +259,7 @@ like(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-index=f2', '--chunk-index-columns=1'
|
||||
) }
|
||||
);
|
||||
@@ -288,7 +290,7 @@ is(
|
||||
$output + 0,
|
||||
3,
|
||||
'Triggers were not dropped'
|
||||
);
|
||||
) or diag($output);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "select count(*) from information_schema.triggers where TRIGGER_SCHEMA='test' AND EVENT_OBJECT_TABLE like '%pt1717%_new' AND trigger_name LIKE 'rt_%'"`;
|
||||
|
||||
@@ -300,7 +302,7 @@ is(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-size=4',
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
@@ -348,7 +350,7 @@ ok(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-size=4',
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
@@ -372,7 +374,7 @@ $output =~ /New table `test`.`([_]+pt1717_new)` not found, restart operation fro
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-size=4',
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
|
239
t/pt-online-schema-change/pt-2355.t
Normal file
239
t/pt-online-schema-change/pt-2355.t
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use threads;
|
||||
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use Data::Dumper;
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
use SqlModes;
|
||||
use File::Temp qw/ tempdir tempfile /;
|
||||
|
||||
our $delay = 10;
|
||||
my $max_lag = $delay / 2;
|
||||
my $output;
|
||||
my $exit;
|
||||
|
||||
my $tmp_file = File::Temp->new();
|
||||
my $tmp_file_name = $tmp_file->filename;
|
||||
unlink $tmp_file_name;
|
||||
|
||||
require "$trunk/bin/pt-online-schema-change";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
if ($sb->is_cluster_mode) {
|
||||
plan skip_all => 'Not for PXC';
|
||||
}
|
||||
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica_dbh1 = $sb->get_dbh_for('replica1');
|
||||
my $replica_dbh2 = $sb->get_dbh_for('replica2');
|
||||
my $source_dsn = 'h=127.0.0.1,P=12345,u=msandbox,p=msandbox';
|
||||
my $replica_dsn1 = 'h=127.0.0.1,P=12346,u=msandbox,p=msandbox';
|
||||
my $replica_dsn2 = 'h=127.0.0.1,P=12347,u=msandbox,p=msandbox';
|
||||
my $sample = "t/pt-online-schema-change/samples";
|
||||
my $plugin = "$trunk/$sample/plugins";
|
||||
|
||||
# We need sync_relay_log=1 to keep changes after replica restart
|
||||
my $cnf = '/tmp/12347/my.sandbox.cnf';
|
||||
diag(`cp $cnf $cnf.bak`);
|
||||
diag(`echo "[mysqld]" > /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "sync_relay_log=1" >> /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "sync_relay_log_info=1" >> /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "relay_log_recovery=1" >> /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "!include /tmp/12347/my.sandbox.2.cnf" >> $cnf`);
|
||||
diag(`/tmp/12347/stop >/dev/null`);
|
||||
sleep 1;
|
||||
diag(`/tmp/12347/start >/dev/null`);
|
||||
|
||||
sub reset_query_cache {
|
||||
my @dbhs = @_;
|
||||
return if ($sandbox_version ge '8.0');
|
||||
foreach my $dbh (@dbhs) {
|
||||
$dbh->do('RESET QUERY CACHE');
|
||||
}
|
||||
}
|
||||
|
||||
sub run_broken_job {
|
||||
my ($args) = @_;
|
||||
my ($fh, $filename) = tempfile();
|
||||
my $pid = fork();
|
||||
|
||||
if (!$pid) {
|
||||
open(STDERR, '>', $filename);
|
||||
open(STDOUT, '>', $filename);
|
||||
exec("$trunk/bin/pt-online-schema-change $args");
|
||||
}
|
||||
|
||||
sleep($max_lag + $max_lag/2);
|
||||
# stop replica 12347
|
||||
diag(`/tmp/12347/stop >/dev/null`);
|
||||
sleep 1;
|
||||
|
||||
waitpid($pid, 0);
|
||||
my $output = do {
|
||||
local $/ = undef;
|
||||
<$fh>;
|
||||
};
|
||||
|
||||
return $output;
|
||||
}
|
||||
|
||||
sub set_delay {
|
||||
$sb->wait_for_replicas();
|
||||
|
||||
diag("Setting replica delay to $delay seconds");
|
||||
diag(`/tmp/12345/use -N test -e "DROP TABLE IF EXISTS pt1717_back"`);
|
||||
|
||||
$replica_dbh1->do("STOP ${replica_name}");
|
||||
$replica_dbh1->do("CHANGE ${source_change} TO ${source_name}_DELAY=$delay");
|
||||
$replica_dbh1->do("START ${replica_name}");
|
||||
|
||||
# Run a full table scan query to ensure the replica is behind the source
|
||||
# There is no query cache in MySQL 8.0+
|
||||
reset_query_cache($source_dbh, $source_dbh);
|
||||
# Update one row so replica is delayed
|
||||
$source_dbh->do('UPDATE `test`.`pt1717` SET f2 = f2 + 1 LIMIT 1');
|
||||
$source_dbh->do('UPDATE `test`.`pt1717` SET f2 = f2 + 1 WHERE f1 = ""');
|
||||
|
||||
# Creating copy of table pt1717, so we can compare data later
|
||||
diag(`/tmp/12345/use -N test -e "CREATE TABLE pt1717_back like pt1717"`);
|
||||
diag(`/tmp/12345/use -N test -e "INSERT INTO pt1717_back SELECT * FROM pt1717"`);
|
||||
}
|
||||
|
||||
# 1) Set the replica delay to 0 just in case we are re-running the tests without restarting the sandbox.
|
||||
# 2) Load sample data
|
||||
# 3) Set the replica delay to 30 seconds to be able to see the 'waiting' message.
|
||||
diag("Setting replica delay to 0 seconds");
|
||||
$replica_dbh1->do("STOP ${replica_name}");
|
||||
$source_dbh->do("RESET ${source_reset}");
|
||||
$replica_dbh1->do("RESET ${replica_name}");
|
||||
$replica_dbh1->do("START ${replica_name}");
|
||||
|
||||
diag('Loading test data');
|
||||
$sb->load_file('source', "t/pt-online-schema-change/samples/pt-1717.sql");
|
||||
|
||||
# Should be greater than chunk-size and big enough, so pt-osc will wait for delay
|
||||
my $num_rows = 5000;
|
||||
my $chunk_size = 10;
|
||||
diag("Loading $num_rows into the table. This might take some time.");
|
||||
diag(`util/mysql_random_data_load --host=127.0.0.1 --port=12345 --user=msandbox --password=msandbox test pt1717 $num_rows`);
|
||||
|
||||
diag("Starting tests...");
|
||||
|
||||
set_delay();
|
||||
|
||||
# We need to sleep, otherwise pt-osc can finish before replica is delayed
|
||||
sleep($max_lag);
|
||||
|
||||
my $args = "$source_dsn,D=test,t=pt1717 --execute --chunk-size ${chunk_size} --max-lag $max_lag --alter 'ADD INDEX idx1(f1)' --pid $tmp_file_name --progress time,5 --no-drop-new-table --no-drop-triggers --history";
|
||||
|
||||
$output = run_broken_job($args);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/`test`.`pt1717` was not altered/s,
|
||||
"pt-osc stopped with error as expected",
|
||||
) or diag($output);
|
||||
|
||||
diag(`/tmp/12347/start >/dev/null`);
|
||||
$sb->wait_for_replicas();
|
||||
|
||||
$output = `/tmp/12345/use -N -e "select job_id, upper_boundary from percona.pt_osc_history"`;
|
||||
my ($job_id, $upper_boundary) = split(/\s+/, $output);
|
||||
|
||||
my $copied_rows = `/tmp/12345/use -N -e "select count(*) from test._pt1717_new"`;
|
||||
chomp($copied_rows);
|
||||
|
||||
ok(
|
||||
$copied_rows eq $upper_boundary,
|
||||
'Upper chunk boundary stored correctly'
|
||||
) or diag("Copied_rows: ${copied_rows}, upper boundary: ${upper_boundary}");;
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main("$source_dsn,D=test,t=pt1717",
|
||||
"--execute", "--chunk-size=${chunk_size}", "--max-lag=${max_lag}",
|
||||
"--alter=ADD INDEX idx1(f1)",
|
||||
"--resume=${job_id}",
|
||||
) }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
0,
|
||||
'pt-osc works correctly with --resume'
|
||||
) or diag($exit);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Successfully altered/,
|
||||
'Success message printed'
|
||||
) or diag($output);
|
||||
|
||||
# Corrupting job record, so we can test error message
|
||||
diag(`/tmp/12345/use -N -e "update percona.pt_osc_history set new_table_name=NULL where job_id=${job_id}"`);
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main("$source_dsn,D=test,t=pt1717",
|
||||
"--execute", "--chunk-size=${chunk_size}", "--max-lag=${max_lag}",
|
||||
"--alter=ADD INDEX idx1(f1)",
|
||||
"--resume=${job_id}",
|
||||
) }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
17,
|
||||
'pt-osc works correctly fails with empty boundaries'
|
||||
) or diag($exit);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Option --resume refers job \d+ with empty boundaries. Exiting./,
|
||||
'Correct error message printed'
|
||||
) or diag($output);
|
||||
|
||||
unlike(
|
||||
$output,
|
||||
qr/Option --resume refers non-existing job ID: \d+. Exiting./,
|
||||
'Misleading error message not printed'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
diag("Cleaning");
|
||||
$replica_dbh2 = $sb->get_dbh_for('replica2');
|
||||
diag("Setting replica delay to 0 seconds");
|
||||
$replica_dbh1->do("STOP ${replica_name}");
|
||||
$replica_dbh2->do("STOP ${replica_name}");
|
||||
$source_dbh->do("RESET ${source_reset}");
|
||||
$replica_dbh1->do("RESET ${source_reset}");
|
||||
$replica_dbh1->do("RESET ${replica_name}");
|
||||
$replica_dbh2->do("RESET ${replica_name}");
|
||||
$replica_dbh1->do("START ${replica_name}");
|
||||
$replica_dbh2->do("START ${replica_name}");
|
||||
|
||||
diag(`mv $cnf.bak $cnf`);
|
||||
|
||||
diag(`/tmp/12347/stop >/dev/null`);
|
||||
diag(`/tmp/12347/start >/dev/null`);
|
||||
|
||||
diag("Dropping test database");
|
||||
$source_dbh->do("DROP DATABASE IF EXISTS test");
|
||||
$sb->wait_for_replicas();
|
||||
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
done_testing;
|
71
t/pt-online-schema-change/pt-2407.t
Normal file
71
t/pt-online-schema-change/pt-2407.t
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-online-schema-change";
|
||||
require VersionParser;
|
||||
|
||||
use Data::Dumper;
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica';
|
||||
}
|
||||
|
||||
my @args = qw(--set-vars innodb_lock_wait_timeout=3);
|
||||
my $output = "";
|
||||
my $dsn = "h=127.1,P=12345,u=msandbox,p=msandbox";
|
||||
my $exit = 0;
|
||||
my $sample = "t/pt-online-schema-change/samples";
|
||||
|
||||
$sb->load_file('source', "$sample/pt-2407.sql");
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_2407,t=t1",
|
||||
'--alter', 'alter table t1 ADD COLUMN payout_group_id VARCHAR(255) DEFAULT NULL, ALGORITHM=INSTANT;', '--execute') }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
11,
|
||||
'Return code non-zero for failed operation'
|
||||
) or diag($exit);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/You have an error in your SQL syntax/,
|
||||
'Job failed due to SQL syntax error'
|
||||
) or diag($output);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Error altering new table/,
|
||||
'Error altering new table message printed'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
#
|
||||
done_testing;
|
120
t/pt-online-schema-change/pt-2422.t
Normal file
120
t/pt-online-schema-change/pt-2422.t
Normal file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-online-schema-change";
|
||||
require VersionParser;
|
||||
|
||||
use Data::Dumper;
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica';
|
||||
}
|
||||
|
||||
my @args = qw(--set-vars innodb_lock_wait_timeout=3);
|
||||
my $output = "";
|
||||
my $dsn = "h=127.1,P=12345,u=msandbox,p=msandbox";
|
||||
my $exit = 0;
|
||||
my $sample = "t/pt-online-schema-change/samples";
|
||||
|
||||
$sb->load_file('source', "$sample/basic_no_fks_innodb.sql");
|
||||
$source_dbh->do('CREATE TABLE pt_osc.pt_2422 LIKE pt_osc.t');
|
||||
$source_dbh->do('INSERT INTO pt_osc.pt_2422 SELECT * FROM pt_osc.t');
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_osc,t=t",
|
||||
'--alter', 'engine=innodb', '--execute', '--history') }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
0,
|
||||
'basic test with option --history finished OK'
|
||||
) or diag($output);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d started/,
|
||||
'Job id printed in the beginning of the tool output'
|
||||
);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d finished successfully/,
|
||||
'Job id printed for successful copy'
|
||||
);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "SELECT new_table_name FROM percona.pt_osc_history WHERE job_id=1"`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/_t_new/,
|
||||
'Correct new table name inserted'
|
||||
) or diag($output);
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_osc,t=pt_2422",
|
||||
'--alter', 'engine=innodb', '--execute', '--history') }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
0,
|
||||
'basic test with second table and option --history finished OK'
|
||||
) or diag($output);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d started/,
|
||||
'Job id printed in the beginning of the tool output for the second table'
|
||||
);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d finished successfully/,
|
||||
'Job id printed for successful copy of the second table'
|
||||
);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "SELECT new_table_name FROM percona.pt_osc_history WHERE job_id=1"`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/_t_new/,
|
||||
'New table name for previouse job was not updated'
|
||||
) or diag($output);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "SELECT new_table_name FROM percona.pt_osc_history WHERE job_id=2"`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/_pt_2422_new/,
|
||||
'Correct new table name inserted for the second table'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
#
|
||||
done_testing;
|
12
t/pt-online-schema-change/samples/pt-2407.sql
Normal file
12
t/pt-online-schema-change/samples/pt-2407.sql
Normal file
@@ -0,0 +1,12 @@
|
||||
CREATE DATABASE pt_2407;
|
||||
|
||||
USE pt_2407;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
c1 int NOT NULL,
|
||||
c2 varchar(100) NOT NULL,
|
||||
PRIMARY KEY (c1),
|
||||
KEY idx (c2)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5);
|
@@ -17,12 +17,6 @@ use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-query-digest";
|
||||
|
||||
ok (1,
|
||||
"version checking site offline for now"
|
||||
);
|
||||
done_testing;
|
||||
exit 0;
|
||||
|
||||
my $output;
|
||||
my $cmd = "$trunk/bin/pt-query-digest --limit 1 $trunk/t/lib/samples/slowlogs/slow001.txt";
|
||||
|
||||
@@ -49,7 +43,7 @@ ok(
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/# Query 1: 0 QPS, 0x concurrency, ID 0x7F7D57ACDD8A346E at byte 0/,
|
||||
qr/# Query 1: 0 QPS, 0x concurrency, ID 0xA853B50CDEB4866B3A99CC42AEDCCFCD at byte 359/,
|
||||
"Tool ran after version-check"
|
||||
) or diag(Dumper($output));
|
||||
|
||||
@@ -172,6 +166,23 @@ ok(
|
||||
|
||||
unlink "/tmp/pt-query-digest.$PID" if "/tmp/pt-query-digest.$PID";
|
||||
|
||||
# #############################################################################
|
||||
# # PT-2129 - tools fail on non-readable version check file
|
||||
# #############################################################################
|
||||
|
||||
system("touch $vc_file");
|
||||
chmod 0000, $vc_file;
|
||||
|
||||
$output = `$cmd --version-check 2>&1`;
|
||||
|
||||
unlike(
|
||||
$output,
|
||||
qr/Can't use an undefined value as an ARRAY reference/,
|
||||
'No undefined value error'
|
||||
) or diag($output);
|
||||
|
||||
chmod 0664, $vc_file;
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
@@ -378,6 +378,29 @@ is(
|
||||
diag(`/tmp/12346/stop >/dev/null`);
|
||||
diag(`/tmp/12346/start >/dev/null`);
|
||||
|
||||
# #############################################################################
|
||||
# typo in pt-table-checksum error message
|
||||
# https://perconadev.atlassian.net/browse/PT-2424
|
||||
# #############################################################################
|
||||
|
||||
$output = output(sub {
|
||||
pt_table_checksum::main($source_dsn,
|
||||
qw(--no-empty-replicate-table --truncate-replicate-table)
|
||||
)},
|
||||
stderr => 1,
|
||||
);
|
||||
|
||||
unlike(
|
||||
$output,
|
||||
qr/--resume and --no-empty-replicate-table are mutually exclusive/,
|
||||
"PT-2424: no typo in the error message"
|
||||
);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/--truncate-replicate-table and --no-empty-replicate-table are mutually exclusive/,
|
||||
"PT-2424: correct error message"
|
||||
);
|
||||
|
||||
#
|
||||
# #############################################################################
|
||||
|
70
t/pt-table-checksum/pt-2400.t
Normal file
70
t/pt-table-checksum/pt-2400.t
Normal file
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
use Data::Dumper;
|
||||
|
||||
# Hostnames make testing less accurate. Tests need to see
|
||||
# that such-and-such happened on specific replica hosts, but
|
||||
# the sandbox servers are all on one host so all replicas have
|
||||
# the same hostname.
|
||||
$ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} = 1;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-table-checksum";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $node1 = $sb->get_dbh_for('node1');
|
||||
my $sb_version = VersionParser->new($node1);
|
||||
my $node2 = $sb->get_dbh_for('node2');
|
||||
my $node3 = $sb->get_dbh_for('node3');
|
||||
|
||||
my %checks = (
|
||||
'Cannot connect to cluster node1' => !$node1,
|
||||
'Cannot connect to cluster node2' => !$node2,
|
||||
'Cannot connect to cluster node3' => !$node3,
|
||||
'PXC tests' => !$sb->is_cluster_mode,
|
||||
);
|
||||
|
||||
for my $message (keys %checks) {
|
||||
if ( $checks{$message} ) {
|
||||
plan skip_all => $message;
|
||||
}
|
||||
}
|
||||
|
||||
my $node1_dsn = $sb->dsn_for('node1');
|
||||
my @args = ($node1_dsn, qw(--databases pt-2400 --tables apple),
|
||||
qw(--recursion-method none),
|
||||
qw(--replicate percona.checksums --create-replicate-table --empty-replicate-table )
|
||||
);
|
||||
my $sample = "t/pt-table-checksum/samples/";
|
||||
|
||||
$sb->load_file('node1', "$sample/pt-2400.sql");
|
||||
|
||||
my ($output, $error) = full_output(
|
||||
sub { pt_table_checksum::main(@args) },
|
||||
stderr => 1,
|
||||
);
|
||||
|
||||
unlike(
|
||||
$output,
|
||||
qr/Immediately starting the version comment after the version number is deprecated and may change behavior in a future release. Please insert a white-space character after the version number./,
|
||||
"No typo in version comment"
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
$sb->wipe_clean($node1);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
done_testing;
|
13
t/pt-table-checksum/samples/pt-2400.sql
Normal file
13
t/pt-table-checksum/samples/pt-2400.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
DROP DATABASE IF EXISTS `pt-2400`;
|
||||
CREATE DATABASE `pt-2400`;
|
||||
USE `pt-2400`;
|
||||
CREATE TABLE `apple` (
|
||||
`id` int NOT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
PRIMARY KEY (`id`,`name`)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO `apple` VALUES
|
||||
(1, 'Granny Smith'),
|
||||
(2, 'Red Delicious'),
|
||||
(3, 'Golden Apple');
|
@@ -63,6 +63,23 @@ is(
|
||||
'--float-precision so no more diff (issue 410)'
|
||||
);
|
||||
|
||||
# Although the SQL statement contains serialized values with more than necessary decimal digits
|
||||
# we produce the expected value on execution
|
||||
$output = `$trunk/bin/pt-table-sync --sync-to-source h=127.1,P=12346,u=msandbox,p=msandbox,D=test,t=fl --execute 2>&1`;
|
||||
is(
|
||||
$output,
|
||||
'',
|
||||
'REPLACE statement can be successfully applied'
|
||||
);
|
||||
|
||||
$sb->wait_for_replicas();
|
||||
my @rows = $replica_dbh->selectrow_array('SELECT `d` FROM `test`.`fl` WHERE `d` = 2.0000012');
|
||||
is_deeply(
|
||||
\@rows,
|
||||
[2.0000012],
|
||||
'Floating point values are set correctly in round trip'
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# pt-table-sync quotes floats, prevents syncing
|
||||
# https://bugs.launchpad.net/percona-toolkit/+bug/1229861
|
||||
@@ -86,7 +103,6 @@ is_deeply(
|
||||
[],
|
||||
"Sync rows with float values (bug 1229861)"
|
||||
) or diag(Dumper($rows), $output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
80
t/pt-table-sync/pt-2375.t
Normal file
80
t/pt-table-sync/pt-2375.t
Normal file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-table-sync";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica1_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica1_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica1';
|
||||
}
|
||||
else {
|
||||
plan tests => 3;
|
||||
}
|
||||
|
||||
my $output;
|
||||
|
||||
# #############################################################################
|
||||
# Test generated REPLACE statements.
|
||||
# #############################################################################
|
||||
$sb->load_file('source', "t/pt-table-sync/samples/pt-2375.sql");
|
||||
$sb->wait_for_replicas();
|
||||
$replica1_dbh->do("delete from `test`.`test_table` where `id`=1");
|
||||
|
||||
$output = remove_traces(output(
|
||||
sub { pt_table_sync::main('--sync-to-source',
|
||||
'h=127.0.0.1,P=12346,u=msandbox,p=msandbox',
|
||||
qw(-t test.test_table --print --execute))
|
||||
},
|
||||
));
|
||||
chomp($output);
|
||||
is(
|
||||
$output,
|
||||
"REPLACE INTO `test`.`test_table`(`id`, `value`) VALUES ('1', '24');",
|
||||
"Generated columns are not used in REPLACE statements"
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Test generated UPDATE statements.
|
||||
# #############################################################################
|
||||
$sb->load_file('source', "t/pt-table-sync/samples/pt-2375.sql");
|
||||
$sb->wait_for_replicas();
|
||||
$replica1_dbh->do("update `test`.`test_table` set `value`=55 where `id`=2");
|
||||
|
||||
$output = remove_traces(output(
|
||||
sub { pt_table_sync::main(qw(--print --execute),
|
||||
"h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=test_table",
|
||||
"h=127.0.0.1,P=12345,u=msandbox,p=msandbox,D=test,t=test_table");
|
||||
}
|
||||
));
|
||||
chomp($output);
|
||||
is(
|
||||
$output,
|
||||
"UPDATE `test`.`test_table` SET `value`='55' WHERE `id`='2' LIMIT 1;",
|
||||
"Generated columns are not used in UPDATE statements"
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
exit;
|
83
t/pt-table-sync/pt-2377.t
Normal file
83
t/pt-table-sync/pt-2377.t
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-table-sync";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica1_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica1_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica1';
|
||||
}
|
||||
elsif ( $sandbox_version lt '8.0') {
|
||||
plan skip_all => 'Requires MySQL >= 8.0';
|
||||
}
|
||||
else {
|
||||
plan tests => 3;
|
||||
}
|
||||
|
||||
my $output;
|
||||
|
||||
# #############################################################################
|
||||
# Test generated REPLACE statements.
|
||||
# #############################################################################
|
||||
$sb->load_file('source', "t/pt-table-sync/samples/pt-2377.sql");
|
||||
$sb->wait_for_replicas();
|
||||
$replica1_dbh->do("delete from `test`.`test_table` where `id`=1");
|
||||
|
||||
$output = remove_traces(output(
|
||||
sub { pt_table_sync::main('--sync-to-source',
|
||||
'h=127.0.0.1,P=12346,u=msandbox,p=msandbox',
|
||||
qw(-t test.test_table --print --execute))
|
||||
},
|
||||
));
|
||||
chomp($output);
|
||||
is(
|
||||
$output,
|
||||
q/REPLACE INTO `test`.`test_table`(`id`, `data`) VALUES ('1', '{"name": "Müller"}');/,
|
||||
"UTF8 characters of JSON values are printed correctly in REPLACE statements"
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Test generated UPDATE statements.
|
||||
# #############################################################################
|
||||
$sb->load_file('source', "t/pt-table-sync/samples/pt-2377.sql");
|
||||
$sb->wait_for_replicas();
|
||||
$replica1_dbh->do(q/update `test`.`test_table` set `data`='{"reaction": "哈哈哈"}' where `id`=2/);
|
||||
|
||||
$output = remove_traces(output(
|
||||
sub { pt_table_sync::main(qw(--print --execute),
|
||||
"h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=test_table",
|
||||
"h=127.0.0.1,P=12345,u=msandbox,p=msandbox,D=test,t=test_table");
|
||||
}
|
||||
));
|
||||
chomp($output);
|
||||
is(
|
||||
$output,
|
||||
q/UPDATE `test`.`test_table` SET `data`='{"reaction": "哈哈哈"}' WHERE `id`='2' LIMIT 1;/,
|
||||
"UTF8 characters of JSON values are printed correctly in UPDATE statements"
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
exit;
|
96
t/pt-table-sync/pt-2378.t
Normal file
96
t/pt-table-sync/pt-2378.t
Normal file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-table-sync";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica1_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica1_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica1';
|
||||
}
|
||||
else {
|
||||
plan tests => 5;
|
||||
}
|
||||
|
||||
my ($output, @rows);
|
||||
|
||||
# #############################################################################
|
||||
# Test generated REPLACE statements.
|
||||
# #############################################################################
|
||||
$sb->load_file('source', "t/pt-table-sync/samples/pt-2378.sql");
|
||||
$sb->wait_for_replicas();
|
||||
$replica1_dbh->do("update `test`.`test_table` set `some_string` = 'c' where `id` = 1");
|
||||
|
||||
$output = remove_traces(output(
|
||||
sub { pt_table_sync::main('--sync-to-source',
|
||||
'h=127.0.0.1,P=12346,u=msandbox,p=msandbox',
|
||||
qw(-t test.test_table --print --execute))
|
||||
},
|
||||
));
|
||||
chomp($output);
|
||||
is(
|
||||
$output,
|
||||
"REPLACE INTO `test`.`test_table`(`id`, `value1`, `value2`, `some_string`) VALUES ('1', 315.25999999999942, 2.6919444444444447, 'a');",
|
||||
"Floating point numbers are generated with sufficient precision in REPLACE statements"
|
||||
);
|
||||
|
||||
$sb->wait_for_replicas();
|
||||
my $query = 'SELECT * FROM `test`.`test_table` WHERE `value1` = 315.2599999999994 AND `value2` = 2.6919444444444447';
|
||||
@rows = $replica1_dbh->selectrow_array($query);
|
||||
is_deeply(
|
||||
\@rows,
|
||||
[1, 315.2599999999994, 2.6919444444444447, 'a'],
|
||||
'Floating point values are set correctly in round trip'
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Test generated UPDATE statements.
|
||||
# #############################################################################
|
||||
$sb->load_file('source', "t/pt-table-sync/samples/pt-2378.sql");
|
||||
$sb->wait_for_replicas();
|
||||
$replica1_dbh->do("update `test`.`test_table` set `some_string` = 'c' where `id` = 1");
|
||||
|
||||
$output = remove_traces(output(
|
||||
sub { pt_table_sync::main(qw(--print --execute),
|
||||
"h=127.0.0.1,P=12346,u=msandbox,p=msandbox,D=test,t=test_table",
|
||||
"h=127.0.0.1,P=12345,u=msandbox,p=msandbox,D=test,t=test_table");
|
||||
}
|
||||
));
|
||||
chomp($output);
|
||||
is(
|
||||
$output,
|
||||
"UPDATE `test`.`test_table` SET `value1`=315.25999999999942, `value2`=2.6919444444444447, `some_string`='c' WHERE `id`='1' LIMIT 1;",
|
||||
"Floating point numbers are generated with sufficient precision in UPDATE statements"
|
||||
);
|
||||
|
||||
@rows = $source_dbh->selectrow_array($query);
|
||||
is_deeply(
|
||||
\@rows,
|
||||
[1, 315.2599999999994, 2.6919444444444447, 'c'],
|
||||
'Floating point values are set correctly in round trip'
|
||||
);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
exit;
|
12
t/pt-table-sync/samples/pt-2375.sql
Normal file
12
t/pt-table-sync/samples/pt-2375.sql
Normal file
@@ -0,0 +1,12 @@
|
||||
DROP DATABASE IF EXISTS test;
|
||||
CREATE DATABASE test;
|
||||
USE test;
|
||||
|
||||
CREATE TABLE `test_table` (
|
||||
`id` INT AUTO_INCREMENT PRIMARY KEY,
|
||||
`value` INT NOT NULL,
|
||||
`derived_value` INT AS (2*`value`)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO `test_table` (`value`) VALUES (24);
|
||||
INSERT INTO `test_table` (`value`) VALUES (42);
|
14
t/pt-table-sync/samples/pt-2377.sql
Normal file
14
t/pt-table-sync/samples/pt-2377.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
DROP DATABASE IF EXISTS test;
|
||||
CREATE DATABASE test;
|
||||
USE test;
|
||||
|
||||
CREATE TABLE `test_table` (
|
||||
`id` INT AUTO_INCREMENT PRIMARY KEY,
|
||||
`data` JSON NOT NULL
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO
|
||||
`test_table` (`data`)
|
||||
VALUES
|
||||
('{"name": "Müller"}'),
|
||||
('{"reaction": "哈哈"}');
|
15
t/pt-table-sync/samples/pt-2378.sql
Normal file
15
t/pt-table-sync/samples/pt-2378.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
DROP DATABASE IF EXISTS test;
|
||||
CREATE DATABASE test;
|
||||
USE test;
|
||||
|
||||
CREATE TABLE `test_table` (
|
||||
`id` BIGINT AUTO_INCREMENT PRIMARY KEY,
|
||||
`value1` DOUBLE NOT NULL,
|
||||
`value2` DOUBLE NOT NULL,
|
||||
`some_string` VARCHAR(32) NOT NULL
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO `test_table`
|
||||
(`value1`, `value2`, `some_string`)
|
||||
VALUES
|
||||
(315.2599999999994, 2.6919444444444447, 'a');
|
@@ -137,6 +137,10 @@ if [ ! -f $tool_file ]; then
|
||||
die "$tool_file does not exist"
|
||||
fi
|
||||
|
||||
if [ -h $tool_file ]; then
|
||||
die "$tool_file is a symbolic link"
|
||||
fi
|
||||
|
||||
if [ -n "$(head -n 1 $tool_file | grep perl)" ]; then
|
||||
tool_lang="perl"
|
||||
else
|
||||
|
Reference in New Issue
Block a user