mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-04 03:26:19 +00:00
Compare commits
80 Commits
PT-2422-pt
...
release-3.
Author | SHA1 | Date | |
---|---|---|---|
![]() |
84095fd7d7 | ||
![]() |
47b8c5b067 | ||
![]() |
7717cfe4f1 | ||
![]() |
f9d9a993e9 | ||
![]() |
7f322f7cbd | ||
![]() |
3ff98c20bc | ||
![]() |
f816053065 | ||
![]() |
34a14ec77e | ||
![]() |
ac53883f29 | ||
![]() |
808c590e7a | ||
![]() |
c09b622c3e | ||
![]() |
2df1bd8950 | ||
![]() |
ebacadf098 | ||
![]() |
c49c58db2b | ||
![]() |
9711db87a7 | ||
![]() |
e964e17f21 | ||
![]() |
c3a201d5f8 | ||
![]() |
2474b1f45b | ||
![]() |
88367c1dea | ||
![]() |
840ba6926b | ||
![]() |
25f4ee6d80 | ||
![]() |
c83d2f547d | ||
![]() |
e4cecc3e69 | ||
![]() |
f9ea94f195 | ||
![]() |
c92d95bc38 | ||
![]() |
6b449ec081 | ||
![]() |
af7bd8abd6 | ||
![]() |
6fad1f0ff0 | ||
![]() |
f4a324581a | ||
![]() |
3cb46e61f7 | ||
![]() |
2198763042 | ||
![]() |
4bf48d864f | ||
![]() |
16f5aac023 | ||
![]() |
69cbfca27f | ||
![]() |
14623c5dce | ||
![]() |
71ffb19e9e | ||
![]() |
5c16d37020 | ||
![]() |
f70f8084dd | ||
![]() |
5474f5d5ff | ||
![]() |
61915d615c | ||
![]() |
201a0d9b18 | ||
![]() |
d0f8fb231b | ||
![]() |
8b61618d35 | ||
![]() |
7887b8f760 | ||
![]() |
1d5788c3e4 | ||
![]() |
a615a82f1f | ||
![]() |
6587df60b7 | ||
![]() |
1511d4cef0 | ||
![]() |
9ad6dc0125 | ||
![]() |
e72235e696 | ||
![]() |
0e1edaed97 | ||
![]() |
6af18b94b0 | ||
![]() |
1641438412 | ||
![]() |
28abea52e7 | ||
![]() |
4f678621c6 | ||
![]() |
b7cf75c37e | ||
![]() |
30b8f4227f | ||
![]() |
330ca87457 | ||
![]() |
6ca0b1d6fc | ||
![]() |
33af9cc021 | ||
![]() |
cf11056f98 | ||
![]() |
71a164c272 | ||
![]() |
beebe501ec | ||
![]() |
260ca8151d | ||
![]() |
9225369a73 | ||
![]() |
2b78478272 | ||
![]() |
ee5ad88e2f | ||
![]() |
74a14966c6 | ||
![]() |
38dfed8c1c | ||
![]() |
d78e4a1396 | ||
![]() |
80bc02916d | ||
![]() |
f312679c13 | ||
![]() |
70ac4351f3 | ||
![]() |
75cddb43db | ||
![]() |
5972093d87 | ||
![]() |
6c2dec8502 | ||
![]() |
9e9f7434d1 | ||
![]() |
888af5f5ef | ||
![]() |
dc77289d60 | ||
![]() |
d5ec5d9ca8 |
4
.github/workflows/toolkit.yml
vendored
4
.github/workflows/toolkit.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
- name: Build the Docker image
|
||||
run: echo "FROM oraclelinux:9-slim" > Dockerfile; echo "RUN microdnf -y update" >> Dockerfile; echo "COPY bin/* /usr/bin/" >> Dockerfile; docker build . --file Dockerfile --tag percona-toolkit:${{ github.sha }}
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@0.29.0
|
||||
uses: aquasecurity/trivy-action@0.30.0
|
||||
with:
|
||||
image-ref: 'percona-toolkit:${{ github.sha }}'
|
||||
format: 'table'
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
vuln-type: 'os,library'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
- name: Upload a Build Artifact
|
||||
uses: actions/upload-artifact@v4.6.0
|
||||
uses: actions/upload-artifact@v4.6.2
|
||||
with:
|
||||
name: binaries
|
||||
path: bin/*
|
||||
|
@@ -1,5 +1,11 @@
|
||||
Changelog for Percona Toolkit
|
||||
|
||||
v3.7.0-1 released 2025-05-14
|
||||
|
||||
This release addresses multiple security vulnerabilities reported in Percona Toolkit version 3.7.0, including issues related to the `libxml2` component (CVE-2024-56171, CVE-2025-24928), `openssl` (CVE-2024-12797), and `krb5` (CVE-2022-37967).
|
||||
|
||||
* Fixed bug PT-2442: percona-toolkit:latest Vulnerability [CVE-2024-56171 CVE-2024-12797 CVE-2022-37967 CVE-2025-24928]
|
||||
|
||||
v3.7.0 released 2024-12-23
|
||||
|
||||
* Feature PT-2340: Support MySQL 8.4
|
||||
|
@@ -11,7 +11,7 @@ MAKE_GOTOOLS
|
||||
|
||||
WriteMakefile(
|
||||
NAME => 'Percona::Toolkit',
|
||||
VERSION => '3.7.0',
|
||||
VERSION => '3.7.0-1',
|
||||
EXE_FILES => [
|
||||
map {
|
||||
(my $name = $_) =~ s/^bin.//;
|
||||
|
@@ -6969,7 +6969,7 @@ sub main {
|
||||
warn "Invalid output format:". $o->get('format');
|
||||
warn "Using default 'dump' format";
|
||||
} elsif ($o->get('output-format') || '' eq 'csv') {
|
||||
$fields_separated_by = ", ";
|
||||
$fields_separated_by = ",";
|
||||
$optionally_enclosed_by = '"';
|
||||
}
|
||||
my $need_hdr = $o->get('header') && !-f $archive_file;
|
||||
@@ -7511,7 +7511,7 @@ sub escape {
|
||||
s/([\t\n\\])/\\$1/g if defined $_; # Escape tabs etc
|
||||
my $s = defined $_ ? $_ : '\N'; # NULL = \N
|
||||
# var & ~var will return 0 only for numbers
|
||||
if ($s !~ /^[0-9,.E]+$/ && $optionally_enclosed_by eq '"') {
|
||||
if ($s !~ /^[0-9,.E]+$/ && $optionally_enclosed_by eq '"' && $s ne '\N') {
|
||||
$s =~ s/([^\\])"/$1\\"/g;
|
||||
$s = $optionally_enclosed_by."$s".$optionally_enclosed_by;
|
||||
}
|
||||
|
@@ -8943,12 +8943,20 @@ sub main {
|
||||
$sth->finish();
|
||||
PTDEBUG && _d('Last chunk:', Dumper($last_chunk));
|
||||
|
||||
if ( !$last_chunk || !$last_chunk->{new_table_name} ) {
|
||||
if ( !$last_chunk ) {
|
||||
$oktorun = undef;
|
||||
_die("Option --resume refers non-existing job ID: ${old_job_id}. Exiting."
|
||||
, UNSUPPORTED_OPERATION);
|
||||
}
|
||||
|
||||
if ( !$last_chunk->{new_table_name}
|
||||
|| !$last_chunk->{lower_boundary}
|
||||
|| !$last_chunk->{upper_boundary} ) {
|
||||
$oktorun = undef;
|
||||
_die("Option --resume refers job ${old_job_id} with empty boundaries. Exiting."
|
||||
, UNSUPPORTED_OPERATION);
|
||||
}
|
||||
|
||||
if ( $last_chunk->{db} ne $db
|
||||
|| $last_chunk->{tbl} ne $tbl
|
||||
|| $last_chunk->{altr} ne $o->get('alter') ){
|
||||
@@ -9607,11 +9615,16 @@ sub main {
|
||||
# ''
|
||||
# doesn't match '(?-xism:Failed to find a unique new table name)'
|
||||
|
||||
# (*) Frank: commented them out because it caused infinite loop
|
||||
# and the mentioned test error doesn't arise
|
||||
|
||||
my $original_error = $EVAL_ERROR;
|
||||
my $original_error_code = $?;
|
||||
my $original_error_code;
|
||||
if ( $? ) {
|
||||
$original_error_code = $?;
|
||||
}
|
||||
else {
|
||||
$original_error_code = $!;
|
||||
}
|
||||
|
||||
$SIG{__DIE__} = 'DEFAULT';
|
||||
|
||||
foreach my $task ( reverse @cleanup_tasks ) {
|
||||
eval {
|
||||
@@ -9797,9 +9810,9 @@ sub main {
|
||||
|
||||
if ( $o->get('history') ) {
|
||||
my $sth = $cxn->dbh()->prepare(
|
||||
"UPDATE ${hist_table} SET new_table_name = ?"
|
||||
"UPDATE ${hist_table} SET new_table_name = ? WHERE job_id = ?"
|
||||
);
|
||||
$sth->execute($new_tbl->{tbl});
|
||||
$sth->execute($new_tbl->{tbl}, $job_id);
|
||||
}
|
||||
|
||||
# If the new table still exists, drop it unless the tool was interrupted.
|
||||
@@ -9912,7 +9925,7 @@ sub main {
|
||||
);
|
||||
}
|
||||
|
||||
if ( my $alter = $o->get('alter') ) {
|
||||
if ( (my $alter = $o->get('alter')) && !$o->get('resume') ) {
|
||||
print "Altering new table...\n";
|
||||
my $sql = "ALTER TABLE $new_tbl->{name} $alter";
|
||||
print $sql, "\n" if $o->get('print');
|
||||
@@ -9921,10 +9934,12 @@ sub main {
|
||||
$cxn->dbh()->do($sql);
|
||||
};
|
||||
if ( $EVAL_ERROR ) {
|
||||
if ( $plugin && $plugin->can('before_die') ) {
|
||||
$plugin->before_die(exit_status => $EVAL_ERROR);
|
||||
}
|
||||
if ( $plugin && $plugin->can('before_die') ) {
|
||||
$plugin->before_die(exit_status => $EVAL_ERROR);
|
||||
}
|
||||
# this is trapped by a signal handler. Don't replace it with _die
|
||||
# we need to override $SIG{__DIE__} to return correct error code
|
||||
$SIG{__DIE__} = sub { print(STDERR "$_[0]"); exit ERROR_ALTERING_TABLE; };
|
||||
die "Error altering new table $new_tbl->{name}: $EVAL_ERROR\n";
|
||||
}
|
||||
print "Altered $new_tbl->{name} OK.\n";
|
||||
|
@@ -582,8 +582,8 @@ OS_NAME=
|
||||
ARCH=
|
||||
OS=
|
||||
INSTALL=0
|
||||
RPM_RELEASE=1
|
||||
DEB_RELEASE=1
|
||||
RPM_RELEASE=2
|
||||
DEB_RELEASE=2
|
||||
REVISION=0
|
||||
GIT_BRANCH=${GIT_BRANCH}
|
||||
GIT_REPO=https://github.com/percona/percona-toolkit.git
|
||||
|
@@ -50,7 +50,7 @@ copyright = u'2024, Percona LLC and/or its affiliates'
|
||||
# The short X.Y version.
|
||||
version = '3.7'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '3.7.0'
|
||||
release = '3.7.0-1'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
|
@@ -19,7 +19,14 @@ We’re always excited to connect and improve everyone's experience.
|
||||
Work with a Percona Expert
|
||||
==============================
|
||||
|
||||
`Percona experts <https://www.percona.com/services/consulting>`_ bring years of experience in tackling tough database performance issues and design challenges. We understand your challenges when managing complex database environments. That's why we offer various services to help you simplify your operations and achieve your goals.
|
||||
Percona experts bring years of experience in tackling tough database performance issues and design challenges.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<div data-tf-live="01JKGY9435F75X6DHG92DJZB26"></div>
|
||||
<script src="//embed.typeform.com/next/embed.js"></script>
|
||||
|
||||
We understand your challenges when managing complex database environments. That's why we offer various services to help you simplify your operations and achieve your goals.
|
||||
|
||||
+----------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| Service | Description |
|
||||
|
@@ -1,6 +1,16 @@
|
||||
Release Notes
|
||||
***************
|
||||
|
||||
v3.7.0-1 released 2025-05-14
|
||||
==============================
|
||||
|
||||
This release addresses multiple security vulnerabilities reported in Percona Toolkit version 3.7.0, including issues related to the `libxml2` component (CVE-2024-56171, CVE-2025-24928), `openssl` (CVE-2024-12797), and `krb5` (CVE-2022-37967).
|
||||
|
||||
Bug Fixed
|
||||
------------
|
||||
|
||||
* :jirabug:`PT-2442`: percona-toolkit:latest Vulnerability [CVE-2024-56171 CVE-2024-12797 CVE-2022-37967 CVE-2025-24928]
|
||||
|
||||
v3.7.0 released 2024-12-23
|
||||
==============================
|
||||
|
||||
|
26
go.mod
26
go.mod
@@ -2,16 +2,18 @@ module github.com/percona/percona-toolkit
|
||||
|
||||
go 1.23.4
|
||||
|
||||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
github.com/AlekSi/pointer v1.2.0
|
||||
github.com/Ladicle/tabwriter v1.0.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible
|
||||
github.com/alecthomas/kong v1.6.1
|
||||
github.com/alecthomas/kong v1.9.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-version v1.7.0
|
||||
github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef
|
||||
@@ -21,17 +23,17 @@ require (
|
||||
github.com/pborman/getopt v1.1.0
|
||||
github.com/percona/go-mysql v0.0.0-20210427141028-73d29c6da78c
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/xlab/treeprint v1.2.0
|
||||
go.mongodb.org/mongo-driver v1.17.2
|
||||
golang.org/x/crypto v0.32.0
|
||||
go.mongodb.org/mongo-driver v1.17.3
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.32.1
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
)
|
||||
|
||||
@@ -60,14 +62,14 @@ require (
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/net v0.36.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.32.1 // indirect
|
||||
k8s.io/apimachinery v0.32.3 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
|
50
go.sum
50
go.sum
@@ -8,8 +8,8 @@ github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8v
|
||||
github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/kong v1.6.1 h1:/7bVimARU3uxPD0hbryPE8qWrS3Oz3kPQoxA/H2NKG8=
|
||||
github.com/alecthomas/kong v1.6.1/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/kong v1.9.0 h1:Wgg0ll5Ys7xDnpgYBuBn/wPeLGAuK0NvYmEcisJgrIs=
|
||||
github.com/alecthomas/kong v1.9.0/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
@@ -38,8 +38,8 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -88,9 +88,9 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
@@ -125,14 +125,14 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
|
||||
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
|
||||
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -146,15 +146,15 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -170,18 +170,18 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@@ -205,10 +205,10 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
|
||||
k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
|
||||
k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
|
||||
k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
|
@@ -14,8 +14,8 @@ sphinxcontrib-srclinks
|
||||
sphinx-tabs
|
||||
|
||||
certifi>=2024.7.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
jinja2>=3.1.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
jinja2>=3.1.6 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
pygments>=2.15.0 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
requests>=2.31.0 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
setuptools>=70.0.0 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
idna>=3.7 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
@@ -10,11 +10,12 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -33,7 +34,8 @@ func TestMain(m *testing.M) {
|
||||
log.Printf("cannot get root path: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestSingleFingerprint(t *testing.T) {
|
||||
|
@@ -7,14 +7,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
tu "github.com/percona/percona-toolkit/src/go/internal/testutils"
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/stats"
|
||||
"github.com/percona/percona-toolkit/src/go/pt-mongodb-query-digest/filter"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -38,7 +39,8 @@ func TestMain(m *testing.M) {
|
||||
log.Printf("cannot get root path: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestRegularIterator(t *testing.T) {
|
||||
|
@@ -13,8 +13,6 @@ type SystemProfile struct {
|
||||
AllUsers []interface{} `bson:"allUsers"`
|
||||
Client string `bson:"client"`
|
||||
CursorExhausted bool `bson:"cursorExhausted"`
|
||||
DocsExamined int `bson:"docsExamined"`
|
||||
NscannedObjects int `bson:"nscannedObjects"`
|
||||
ExecStats struct {
|
||||
Advanced int `bson:"advanced"`
|
||||
ExecutionTimeMillisEstimate int `bson:"executionTimeMillisEstimate"`
|
||||
@@ -48,28 +46,37 @@ type SystemProfile struct {
|
||||
SaveState int `bson:"saveState"`
|
||||
Stage string `bson:"stage"`
|
||||
Works int `bson:"works"`
|
||||
DocsExamined int `bson:"docsExamined"`
|
||||
} `bson:"execStats"`
|
||||
KeyUpdates int `bson:"keyUpdates"`
|
||||
KeysExamined int `bson:"keysExamined"`
|
||||
Locks struct {
|
||||
Collection struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"R"`
|
||||
Read int `bson:"R"`
|
||||
ReadShared int `bson:"r"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"Collection"`
|
||||
Database struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"r"`
|
||||
ReadShared int `bson:"r"`
|
||||
} `bson:"acquireCount"`
|
||||
AcquireWaitCount struct {
|
||||
ReadShared int `bson:"r"`
|
||||
} `bson:"acquireWaitCount"`
|
||||
TimeAcquiringMicros struct {
|
||||
ReadShared int64 `bson:"r"`
|
||||
} `bson:"timeAcquiringMicros"`
|
||||
} `bson:"Database"`
|
||||
Global struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"r"`
|
||||
ReadShared int `bson:"r"`
|
||||
WriteShared int `bson:"w"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"Global"`
|
||||
MMAPV1Journal struct {
|
||||
AcquireCount struct {
|
||||
R int `bson:"r"`
|
||||
ReadShared int `bson:"r"`
|
||||
} `bson:"acquireCount"`
|
||||
} `bson:"MMAPV1Journal"`
|
||||
} `bson:"locks"`
|
||||
@@ -78,6 +85,7 @@ type SystemProfile struct {
|
||||
Ns string `bson:"ns"`
|
||||
NumYield int `bson:"numYield"`
|
||||
Op string `bson:"op"`
|
||||
PlanSummary string `bson:"planSummary"`
|
||||
Protocol string `bson:"protocol"`
|
||||
Query bson.D `bson:"query"`
|
||||
UpdateObj bson.D `bson:"updateobj"`
|
||||
@@ -87,6 +95,16 @@ type SystemProfile struct {
|
||||
Ts time.Time `bson:"ts"`
|
||||
User string `bson:"user"`
|
||||
WriteConflicts int `bson:"writeConflicts"`
|
||||
DocsExamined int `bson:"docsExamined"`
|
||||
QueryHash string `bson:"queryHash"`
|
||||
Storage struct {
|
||||
Data struct {
|
||||
BytesRead int64 `bson:"bytesRead"`
|
||||
TimeReadingMicros int64 `bson:"timeReadingMicros"`
|
||||
} `bson:"data"`
|
||||
} `bson:"storage"`
|
||||
AppName string `bson:"appName"`
|
||||
Comments string `bson:"comments"`
|
||||
}
|
||||
|
||||
func NewExampleQuery(doc SystemProfile) ExampleQuery {
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +14,10 @@ import (
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
planSummaryCollScan = "COLLSCAN"
|
||||
)
|
||||
|
||||
type StatsError struct {
|
||||
error
|
||||
}
|
||||
@@ -86,18 +91,23 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
|
||||
Namespace: fp.Namespace,
|
||||
TableScan: false,
|
||||
Query: string(queryBson),
|
||||
PlanSummary: doc.PlanSummary,
|
||||
QueryHash: doc.QueryHash,
|
||||
AppName: doc.AppName,
|
||||
Client: doc.Client,
|
||||
User: strings.Split(doc.User, "@")[0],
|
||||
Comments: doc.Comments,
|
||||
}
|
||||
s.setQueryInfoAndCounters(key, qiac)
|
||||
}
|
||||
qiac.Count++
|
||||
// docsExamined is renamed from nscannedObjects in 3.2.0.
|
||||
// https://docs.mongodb.com/manual/reference/database-profiler/#system.profile.docsExamined
|
||||
s.Lock()
|
||||
if doc.NscannedObjects > 0 {
|
||||
qiac.NScanned = append(qiac.NScanned, float64(doc.NscannedObjects))
|
||||
} else {
|
||||
qiac.NScanned = append(qiac.NScanned, float64(doc.DocsExamined))
|
||||
if qiac.PlanSummary == planSummaryCollScan {
|
||||
qiac.CollScanCount++
|
||||
}
|
||||
|
||||
qiac.PlanSummary = strings.Split(qiac.PlanSummary, " ")[0]
|
||||
|
||||
qiac.NReturned = append(qiac.NReturned, float64(doc.Nreturned))
|
||||
qiac.QueryTime = append(qiac.QueryTime, float64(doc.Millis))
|
||||
qiac.ResponseLength = append(qiac.ResponseLength, float64(doc.ResponseLength))
|
||||
@@ -107,6 +117,42 @@ func (s *Stats) Add(doc proto.SystemProfile) error {
|
||||
if qiac.LastSeen.IsZero() || qiac.LastSeen.Before(doc.Ts) {
|
||||
qiac.LastSeen = doc.Ts
|
||||
}
|
||||
|
||||
if doc.DocsExamined > 0 {
|
||||
qiac.DocsExamined = append(qiac.DocsExamined, float64(doc.DocsExamined))
|
||||
}
|
||||
if doc.KeysExamined > 0 {
|
||||
qiac.KeysExamined = append(qiac.KeysExamined, float64(doc.KeysExamined))
|
||||
}
|
||||
if doc.Locks.Global.AcquireCount.ReadShared > 0 {
|
||||
qiac.LocksGlobalAcquireCountReadSharedCount++
|
||||
qiac.LocksGlobalAcquireCountReadShared += doc.Locks.Global.AcquireCount.ReadShared
|
||||
}
|
||||
if doc.Locks.Global.AcquireCount.WriteShared > 0 {
|
||||
qiac.LocksGlobalAcquireCountWriteSharedCount++
|
||||
qiac.LocksGlobalAcquireCountWriteShared += doc.Locks.Global.AcquireCount.WriteShared
|
||||
}
|
||||
if doc.Locks.Database.AcquireCount.ReadShared > 0 {
|
||||
qiac.LocksDatabaseAcquireCountReadSharedCount++
|
||||
qiac.LocksDatabaseAcquireCountReadShared += doc.Locks.Database.AcquireCount.ReadShared
|
||||
}
|
||||
if doc.Locks.Database.AcquireWaitCount.ReadShared > 0 {
|
||||
qiac.LocksDatabaseAcquireWaitCountReadSharedCount++
|
||||
qiac.LocksDatabaseAcquireWaitCountReadShared += doc.Locks.Database.AcquireWaitCount.ReadShared
|
||||
}
|
||||
if doc.Locks.Database.TimeAcquiringMicros.ReadShared > 0 {
|
||||
qiac.LocksDatabaseTimeAcquiringMicrosReadShared = append(qiac.LocksDatabaseTimeAcquiringMicrosReadShared, float64(doc.Locks.Database.TimeAcquiringMicros.ReadShared))
|
||||
}
|
||||
if doc.Locks.Collection.AcquireCount.ReadShared > 0 {
|
||||
qiac.LocksCollectionAcquireCountReadSharedCount++
|
||||
qiac.LocksCollectionAcquireCountReadShared += doc.Locks.Collection.AcquireCount.ReadShared
|
||||
}
|
||||
if doc.Storage.Data.BytesRead > 0 {
|
||||
qiac.StorageBytesRead = append(qiac.StorageBytesRead, float64(doc.Storage.Data.BytesRead))
|
||||
}
|
||||
if doc.Storage.Data.TimeReadingMicros > 0 {
|
||||
qiac.StorageTimeReadingMicros = append(qiac.StorageTimeReadingMicros, float64(doc.Storage.Data.TimeReadingMicros))
|
||||
}
|
||||
s.Unlock()
|
||||
|
||||
return nil
|
||||
@@ -185,9 +231,34 @@ type QueryInfoAndCounters struct {
|
||||
BlockedTime Times
|
||||
LockTime Times
|
||||
NReturned []float64
|
||||
NScanned []float64
|
||||
QueryTime []float64 // in milliseconds
|
||||
ResponseLength []float64
|
||||
|
||||
PlanSummary string
|
||||
CollScanCount int
|
||||
|
||||
DocsExamined []float64
|
||||
KeysExamined []float64
|
||||
QueryHash string
|
||||
AppName string
|
||||
Client string
|
||||
User string
|
||||
Comments string
|
||||
|
||||
LocksGlobalAcquireCountReadSharedCount int
|
||||
LocksGlobalAcquireCountReadShared int
|
||||
LocksGlobalAcquireCountWriteSharedCount int
|
||||
LocksGlobalAcquireCountWriteShared int
|
||||
LocksDatabaseAcquireCountReadSharedCount int
|
||||
LocksDatabaseAcquireCountReadShared int
|
||||
LocksDatabaseAcquireWaitCountReadSharedCount int
|
||||
LocksDatabaseAcquireWaitCountReadShared int
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared []float64 // in microseconds
|
||||
LocksCollectionAcquireCountReadSharedCount int
|
||||
LocksCollectionAcquireCountReadShared int
|
||||
|
||||
StorageBytesRead []float64
|
||||
StorageTimeReadingMicros []float64 // in microseconds
|
||||
}
|
||||
|
||||
// times is an array of time.Time that implements the Sorter interface
|
||||
@@ -214,11 +285,15 @@ func (g GroupKey) String() string {
|
||||
}
|
||||
|
||||
type totalCounters struct {
|
||||
Count int
|
||||
Scanned float64
|
||||
Returned float64
|
||||
QueryTime float64
|
||||
Bytes float64
|
||||
Count int
|
||||
Returned float64
|
||||
QueryTime float64
|
||||
Bytes float64
|
||||
DocsExamined float64
|
||||
KeysExamined float64
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared float64
|
||||
StorageBytesRead float64
|
||||
StorageTimeReadingMicros float64
|
||||
}
|
||||
|
||||
type QueryStats struct {
|
||||
@@ -230,14 +305,44 @@ type QueryStats struct {
|
||||
FirstSeen time.Time
|
||||
LastSeen time.Time
|
||||
|
||||
Count int
|
||||
QPS float64
|
||||
Rank int
|
||||
Ratio float64
|
||||
QueryTime Statistics
|
||||
ResponseLength Statistics
|
||||
Returned Statistics
|
||||
Scanned Statistics
|
||||
Count int
|
||||
QPS float64
|
||||
Rank int
|
||||
Ratio float64
|
||||
QueryTime Statistics
|
||||
ResponseLengthCount int
|
||||
ResponseLength Statistics
|
||||
Returned Statistics
|
||||
|
||||
PlanSummary string
|
||||
CollScanCount int
|
||||
DocsExaminedCount int
|
||||
DocsExamined Statistics
|
||||
KeysExaminedCount int
|
||||
KeysExamined Statistics
|
||||
QueryHash string
|
||||
AppName string
|
||||
Client string
|
||||
User string
|
||||
Comments string
|
||||
|
||||
LocksGlobalAcquireCountReadSharedCount int
|
||||
LocksGlobalAcquireCountReadShared int
|
||||
LocksGlobalAcquireCountWriteSharedCount int
|
||||
LocksGlobalAcquireCountWriteShared int
|
||||
LocksDatabaseAcquireCountReadSharedCount int
|
||||
LocksDatabaseAcquireCountReadShared int
|
||||
LocksDatabaseAcquireWaitCountReadSharedCount int
|
||||
LocksDatabaseAcquireWaitCountReadShared int
|
||||
LocksDatabaseTimeAcquiringMicrosReadSharedCount int
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared Statistics // in microseconds
|
||||
LocksCollectionAcquireCountReadSharedCount int
|
||||
LocksCollectionAcquireCountReadShared int
|
||||
|
||||
StorageBytesReadCount int
|
||||
StorageBytesRead Statistics
|
||||
StorageTimeReadingMicrosCount int
|
||||
StorageTimeReadingMicros Statistics // in microseconds
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
@@ -254,22 +359,46 @@ type Statistics struct {
|
||||
|
||||
func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters) QueryStats {
|
||||
queryStats := QueryStats{
|
||||
Count: query.Count,
|
||||
ID: query.ID,
|
||||
Operation: query.Operation,
|
||||
Query: query.Query,
|
||||
Fingerprint: query.Fingerprint,
|
||||
Scanned: calcStats(query.NScanned),
|
||||
Returned: calcStats(query.NReturned),
|
||||
QueryTime: calcStats(query.QueryTime),
|
||||
ResponseLength: calcStats(query.ResponseLength),
|
||||
FirstSeen: query.FirstSeen,
|
||||
LastSeen: query.LastSeen,
|
||||
Namespace: query.Namespace,
|
||||
QPS: float64(query.Count) / float64(uptime),
|
||||
}
|
||||
if tc.Scanned > 0 {
|
||||
queryStats.Scanned.Pct = queryStats.Scanned.Total * 100 / tc.Scanned
|
||||
Count: query.Count,
|
||||
ID: query.ID,
|
||||
Operation: query.Operation,
|
||||
Query: query.Query,
|
||||
Fingerprint: query.Fingerprint,
|
||||
Returned: calcStats(query.NReturned),
|
||||
QueryTime: calcStats(query.QueryTime),
|
||||
FirstSeen: query.FirstSeen,
|
||||
LastSeen: query.LastSeen,
|
||||
Namespace: query.Namespace,
|
||||
QPS: float64(query.Count) / float64(uptime),
|
||||
PlanSummary: query.PlanSummary,
|
||||
CollScanCount: query.CollScanCount,
|
||||
ResponseLengthCount: len(query.ResponseLength),
|
||||
ResponseLength: calcStats(query.ResponseLength),
|
||||
DocsExaminedCount: len(query.DocsExamined),
|
||||
DocsExamined: calcStats(query.DocsExamined),
|
||||
KeysExaminedCount: len(query.KeysExamined),
|
||||
KeysExamined: calcStats(query.KeysExamined),
|
||||
QueryHash: query.QueryHash,
|
||||
AppName: query.AppName,
|
||||
Client: query.Client,
|
||||
User: query.User,
|
||||
Comments: query.Comments,
|
||||
LocksGlobalAcquireCountReadSharedCount: query.LocksGlobalAcquireCountReadSharedCount,
|
||||
LocksGlobalAcquireCountReadShared: query.LocksGlobalAcquireCountReadShared,
|
||||
LocksGlobalAcquireCountWriteSharedCount: query.LocksGlobalAcquireCountWriteSharedCount,
|
||||
LocksGlobalAcquireCountWriteShared: query.LocksGlobalAcquireCountWriteShared,
|
||||
LocksDatabaseAcquireCountReadSharedCount: query.LocksDatabaseAcquireCountReadSharedCount,
|
||||
LocksDatabaseAcquireCountReadShared: query.LocksDatabaseAcquireCountReadShared,
|
||||
LocksDatabaseAcquireWaitCountReadSharedCount: query.LocksDatabaseAcquireWaitCountReadSharedCount,
|
||||
LocksDatabaseAcquireWaitCountReadShared: query.LocksDatabaseAcquireWaitCountReadShared,
|
||||
LocksDatabaseTimeAcquiringMicrosReadSharedCount: len(query.LocksDatabaseTimeAcquiringMicrosReadShared),
|
||||
LocksDatabaseTimeAcquiringMicrosReadShared: calcStats(query.LocksDatabaseTimeAcquiringMicrosReadShared),
|
||||
LocksCollectionAcquireCountReadSharedCount: query.LocksCollectionAcquireCountReadSharedCount,
|
||||
LocksCollectionAcquireCountReadShared: query.LocksCollectionAcquireCountReadShared,
|
||||
StorageBytesReadCount: len(query.StorageBytesRead),
|
||||
StorageBytesRead: calcStats(query.StorageBytesRead),
|
||||
StorageTimeReadingMicrosCount: len(query.StorageTimeReadingMicros),
|
||||
StorageTimeReadingMicros: calcStats(query.StorageTimeReadingMicros),
|
||||
}
|
||||
if tc.Returned > 0 {
|
||||
queryStats.Returned.Pct = queryStats.Returned.Total * 100 / tc.Returned
|
||||
@@ -281,7 +410,22 @@ func countersToStats(query QueryInfoAndCounters, uptime int64, tc totalCounters)
|
||||
queryStats.ResponseLength.Pct = queryStats.ResponseLength.Total * 100 / tc.Bytes
|
||||
}
|
||||
if queryStats.Returned.Total > 0 {
|
||||
queryStats.Ratio = queryStats.Scanned.Total / queryStats.Returned.Total
|
||||
queryStats.Ratio = queryStats.DocsExamined.Total / queryStats.Returned.Total
|
||||
}
|
||||
if tc.DocsExamined > 0 {
|
||||
queryStats.DocsExamined.Pct = queryStats.DocsExamined.Total * 100 / tc.DocsExamined
|
||||
}
|
||||
if tc.KeysExamined > 0 {
|
||||
queryStats.KeysExamined.Pct = queryStats.KeysExamined.Total * 100 / tc.KeysExamined
|
||||
}
|
||||
if tc.LocksDatabaseTimeAcquiringMicrosReadShared > 0 {
|
||||
queryStats.LocksDatabaseTimeAcquiringMicrosReadShared.Pct = queryStats.LocksDatabaseTimeAcquiringMicrosReadShared.Total * 100 / tc.LocksDatabaseTimeAcquiringMicrosReadShared
|
||||
}
|
||||
if tc.StorageBytesRead > 0 {
|
||||
queryStats.StorageBytesRead.Pct = queryStats.StorageBytesRead.Total * 100 / tc.StorageBytesRead
|
||||
}
|
||||
if tc.StorageTimeReadingMicros > 0 {
|
||||
queryStats.StorageTimeReadingMicros.Pct = queryStats.StorageTimeReadingMicros.Total * 100 / tc.StorageTimeReadingMicros
|
||||
}
|
||||
|
||||
return queryStats
|
||||
@@ -291,10 +435,14 @@ func aggregateCounters(queries []QueryInfoAndCounters) QueryInfoAndCounters {
|
||||
qt := QueryInfoAndCounters{}
|
||||
for _, query := range queries {
|
||||
qt.Count += query.Count
|
||||
qt.NScanned = append(qt.NScanned, query.NScanned...)
|
||||
qt.NReturned = append(qt.NReturned, query.NReturned...)
|
||||
qt.QueryTime = append(qt.QueryTime, query.QueryTime...)
|
||||
qt.ResponseLength = append(qt.ResponseLength, query.ResponseLength...)
|
||||
qt.DocsExamined = append(qt.DocsExamined, query.DocsExamined...)
|
||||
qt.KeysExamined = append(qt.KeysExamined, query.KeysExamined...)
|
||||
qt.LocksDatabaseTimeAcquiringMicrosReadShared = append(qt.LocksDatabaseTimeAcquiringMicrosReadShared, query.LocksDatabaseTimeAcquiringMicrosReadShared...)
|
||||
qt.StorageBytesRead = append(qt.StorageBytesRead, query.StorageBytesRead...)
|
||||
qt.StorageTimeReadingMicros = append(qt.StorageTimeReadingMicros, query.StorageTimeReadingMicros...)
|
||||
}
|
||||
return qt
|
||||
}
|
||||
@@ -305,9 +453,6 @@ func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters {
|
||||
for _, query := range queries {
|
||||
tc.Count += query.Count
|
||||
|
||||
scanned, _ := stats.Sum(query.NScanned)
|
||||
tc.Scanned += scanned
|
||||
|
||||
returned, _ := stats.Sum(query.NReturned)
|
||||
tc.Returned += returned
|
||||
|
||||
@@ -316,11 +461,30 @@ func calcTotalCounters(queries []QueryInfoAndCounters) totalCounters {
|
||||
|
||||
bytes, _ := stats.Sum(query.ResponseLength)
|
||||
tc.Bytes += bytes
|
||||
|
||||
docsExamined, _ := stats.Sum(query.DocsExamined)
|
||||
tc.DocsExamined += docsExamined
|
||||
|
||||
keysExamined, _ := stats.Sum(query.KeysExamined)
|
||||
tc.KeysExamined += keysExamined
|
||||
|
||||
locksDatabaseTimeAcquiringMicrosReadShared, _ := stats.Sum(query.LocksDatabaseTimeAcquiringMicrosReadShared)
|
||||
tc.LocksDatabaseTimeAcquiringMicrosReadShared += locksDatabaseTimeAcquiringMicrosReadShared
|
||||
|
||||
storageBytesRead, _ := stats.Sum(query.StorageBytesRead)
|
||||
tc.StorageBytesRead += storageBytesRead
|
||||
|
||||
storageTimeReadingMicros, _ := stats.Sum(query.StorageTimeReadingMicros)
|
||||
tc.StorageTimeReadingMicros += storageTimeReadingMicros
|
||||
}
|
||||
return tc
|
||||
}
|
||||
|
||||
func calcStats(samples []float64) Statistics {
|
||||
if len(samples) == 0 {
|
||||
return Statistics{}
|
||||
}
|
||||
|
||||
var s Statistics
|
||||
s.Total, _ = stats.Sum(samples)
|
||||
s.Min, _ = stats.Min(samples)
|
||||
|
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
|
||||
"github.com/percona/percona-toolkit/src/go/lib/tutil"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/fingerprinter"
|
||||
"github.com/percona/percona-toolkit/src/go/mongolib/proto"
|
||||
@@ -40,8 +41,8 @@ func TestMain(m *testing.M) {
|
||||
log.Printf("cannot get root path: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
// TODO: Review with the new sandbox
|
||||
// os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestTimesLen(t *testing.T) {
|
||||
@@ -158,9 +159,9 @@ func TestStats(t *testing.T) {
|
||||
BlockedTime: nil,
|
||||
LockTime: nil,
|
||||
NReturned: []float64{0},
|
||||
NScanned: []float64{10000},
|
||||
QueryTime: []float64{7},
|
||||
ResponseLength: []float64{215},
|
||||
DocsExamined: []float64{10000},
|
||||
}
|
||||
|
||||
want := Queries{
|
||||
|
@@ -54,7 +54,7 @@ Options
|
||||
|
||||
``-o``, ``--order-by``
|
||||
Specifies the sorting order using fields:
|
||||
``count``, ``ratio``, ``query-time``, ``docs-scanned``, ``docs-returned``.
|
||||
``count``, ``ratio``, ``query-time``, ``docs-examined``, ``docs-returned``.
|
||||
|
||||
Adding a hyphen (``-``) in front of a field denotes reverse order.
|
||||
For example: ``--order-by="count,-ratio"``.
|
||||
@@ -94,13 +94,13 @@ Output Example
|
||||
.. code-block:: none
|
||||
|
||||
# Query 3: 0.06 QPS, ID 0b906bd86148def663d11b402f3e41fa
|
||||
# Ratio 1.00 (docs scanned/returned)
|
||||
# Ratio 1.00 (docs examined/returned)
|
||||
# Time range: 2017-02-03 16:01:37.484 -0300 ART to 2017-02-03 16:02:08.43 -0300 ART
|
||||
# Attribute pct total min max avg 95% stddev median
|
||||
# ================== === ======== ======== ======== ======== ======== ======= ========
|
||||
# Count (docs) 100
|
||||
# Exec Time ms 2 3 0 1 0 0 0 0
|
||||
# Docs Scanned 5 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
|
||||
# Docs Examined 5 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
|
||||
# Docs Returned 92 7.50K 75.00 75.00 75.00 75.00 0.00 75.00
|
||||
# Bytes recv 1 106.12M 1.06M 1.06M 1.06M 1.06M 0.00 1.06M
|
||||
# String:
|
||||
|
@@ -497,23 +497,23 @@ func sortQueries(queries []stats.QueryStats, orderby []string) []stats.QueryStat
|
||||
}
|
||||
|
||||
//
|
||||
case "docs-scanned":
|
||||
case "docs-examined":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Scanned.Max < c2.Scanned.Max
|
||||
return c1.DocsExamined.Max < c2.DocsExamined.Max
|
||||
}
|
||||
case "-docs-scanned":
|
||||
case "-docs-examined":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Scanned.Max > c2.Scanned.Max
|
||||
return c1.DocsExamined.Max > c2.DocsExamined.Max
|
||||
}
|
||||
|
||||
//
|
||||
case "docs-returned":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Returned.Max < c2.Scanned.Max
|
||||
return c1.Returned.Max < c2.DocsExamined.Max
|
||||
}
|
||||
case "-docs-returned":
|
||||
f = func(c1, c2 *stats.QueryStats) bool {
|
||||
return c1.Returned.Max > c2.Scanned.Max
|
||||
return c1.Returned.Max > c2.DocsExamined.Max
|
||||
}
|
||||
}
|
||||
// count,query-time,docs-scanned, docs-returned. - in front of the field name denotes reverse order.")
|
||||
|
@@ -32,7 +32,8 @@ var logger = logrus.New()
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logger.SetLevel(logrus.WarnLevel)
|
||||
os.Exit(m.Run())
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestConnection(t *testing.T) {
|
||||
|
@@ -15,7 +15,7 @@ Usage
|
||||
|
||||
::
|
||||
|
||||
pt-secure-data [<flags>] <command> [<args> ...]
|
||||
pt-secure-collect [<flags>] <command> [<args> ...]
|
||||
|
||||
By default, :program:`pt-secure-collect` will collect the output of:
|
||||
|
||||
|
@@ -123,11 +123,11 @@ $output = output(
|
||||
);
|
||||
$output = `cat archive.test.table_2`;
|
||||
is($output, <<EOF
|
||||
1, 2, 3, 4
|
||||
2, "\\N", 3, 4
|
||||
3, 2, 3, "\\\t"
|
||||
4, 2, 3, "\\\n"
|
||||
5, 2, 3, "Zapp \\"Brannigan"
|
||||
1,2,3,4
|
||||
2,\\N,3,4
|
||||
3,2,3,"\\\t"
|
||||
4,2,3,"\\\n"
|
||||
5,2,3,"Zapp \\"Brannigan"
|
||||
EOF
|
||||
, '--output-format=csv');
|
||||
`rm -f archive.test.table_2`;
|
||||
|
75
t/pt-archiver/pt-2410.t
Normal file
75
t/pt-archiver/pt-2410.t
Normal file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use charnames ':full';
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-archiver";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $dbh = $sb->get_dbh_for('source');
|
||||
|
||||
if ( !$dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
|
||||
my $output;
|
||||
my $exit_status;
|
||||
my $cnf = "/tmp/12345/my.sandbox.cnf";
|
||||
my $cmd = "$trunk/bin/pt-archiver";
|
||||
|
||||
$sb->wipe_clean($dbh);
|
||||
$sb->create_dbs($dbh, ['test']);
|
||||
|
||||
$sb->load_file('source', 't/pt-archiver/samples/pt-2410.sql');
|
||||
|
||||
($output, $exit_status) = full_output(
|
||||
sub { pt_archiver::main(
|
||||
qw(--where 1=1 --output-format=csv),
|
||||
'--source', "L=1,D=pt_2410,t=test,F=$cnf",
|
||||
'--file', '/tmp/pt-2410.csv') },
|
||||
);
|
||||
|
||||
is(
|
||||
$exit_status,
|
||||
0,
|
||||
'pt-archiver comleted'
|
||||
);
|
||||
|
||||
$output = `cat /tmp/pt-2410.csv`;
|
||||
like(
|
||||
$output,
|
||||
qr/1,\\N,"testing..."/,
|
||||
'NULL values stored correctly'
|
||||
) or diag($output);
|
||||
|
||||
$dbh->do("load data local infile '/tmp/pt-2410.csv' into table pt_2410.test COLUMNS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '\"'");
|
||||
|
||||
$output = `/tmp/12345/use pt_2410 -N -e 'SELECT * FROM test'`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/1 NULL testing.../,
|
||||
'NULL values loaded correctly'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
diag(`rm -f /tmp/pt-2410.csv`);
|
||||
$sb->wipe_clean($dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
done_testing;
|
||||
exit;
|
10
t/pt-archiver/samples/pt-2410.sql
Normal file
10
t/pt-archiver/samples/pt-2410.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
CREATE DATABASE pt_2410;
|
||||
USE pt_2410;
|
||||
|
||||
CREATE TABLE test(
|
||||
id int not null primary key auto_increment,
|
||||
column1 int default null,
|
||||
column2 varchar(50) not null);
|
||||
|
||||
INSERT INTO test VALUES (null,null,'testing...');
|
||||
INSERT INTO test VALUES (null,null,'testing...');
|
@@ -137,7 +137,7 @@ set_delay();
|
||||
# We need to sleep, otherwise pt-osc can finish before replica is delayed
|
||||
sleep($max_lag);
|
||||
|
||||
my $args = "$source_dsn,D=test,t=pt1717 --execute --chunk-size ${chunk_size} --max-lag $max_lag --alter 'engine=INNODB' --pid $tmp_file_name --progress time,5 --no-drop-new-table --no-drop-triggers --history";
|
||||
my $args = "$source_dsn,D=test,t=pt1717 --execute --chunk-size ${chunk_size} --max-lag $max_lag --alter 'ADD COLUMN foo varchar(32)' --pid $tmp_file_name --progress time,5 --no-drop-new-table --no-drop-triggers --history";
|
||||
|
||||
$output = run_broken_job($args);
|
||||
|
||||
@@ -165,7 +165,7 @@ my @args = (qw(--execute --chunk-size=10 --history));
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=INNODB', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
);
|
||||
@@ -186,7 +186,7 @@ like(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--max-lag', $max_lag,
|
||||
'--resume', $job_id,
|
||||
'--alter', 'engine=INNODB',
|
||||
'--alter', 'ADD COLUMN foo varchar(32)',
|
||||
'--plugin', "$plugin/pt-1717.pm",
|
||||
),
|
||||
},
|
||||
@@ -208,8 +208,10 @@ ok(
|
||||
'All rows copied correctly'
|
||||
) or diag("New table checksum: '${new_table_checksum}', original content checksum: '${old_table_checksum}'");
|
||||
|
||||
diag(`/tmp/12345/use test -N -e "ALTER TABLE pt1717 DROP COLUMN foo"`);
|
||||
|
||||
# Tests for chunk-index and chunk-index-columns options
|
||||
$args = "$source_dsn,D=test,t=pt1717 --alter engine=innodb --execute --history --chunk-size=10 --no-drop-new-table --no-drop-triggers --reverse-triggers --chunk-index=f2";
|
||||
$args = "$source_dsn,D=test,t=pt1717 --alter 'ADD COLUMN foo varchar(32)' --execute --history --chunk-size=10 --no-drop-new-table --no-drop-triggers --reverse-triggers --chunk-index=f2";
|
||||
|
||||
set_delay();
|
||||
$output = run_broken_job($args);
|
||||
@@ -220,7 +222,7 @@ $job_id = $1;
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
) }
|
||||
);
|
||||
|
||||
@@ -238,7 +240,7 @@ like(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-index=f1'
|
||||
) }
|
||||
);
|
||||
@@ -257,7 +259,7 @@ like(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-index=f2', '--chunk-index-columns=1'
|
||||
) }
|
||||
);
|
||||
@@ -288,7 +290,7 @@ is(
|
||||
$output + 0,
|
||||
3,
|
||||
'Triggers were not dropped'
|
||||
);
|
||||
) or diag($output);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "select count(*) from information_schema.triggers where TRIGGER_SCHEMA='test' AND EVENT_OBJECT_TABLE like '%pt1717%_new' AND trigger_name LIKE 'rt_%'"`;
|
||||
|
||||
@@ -300,7 +302,7 @@ is(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-size=4',
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
@@ -348,7 +350,7 @@ ok(
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-size=4',
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
@@ -372,7 +374,7 @@ $output =~ /New table `test`.`([_]+pt1717_new)` not found, restart operation fro
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$source_dsn,D=test,t=pt1717",
|
||||
'--alter', 'engine=innodb', '--execute', "--resume=${job_id}",
|
||||
'--alter', 'ADD COLUMN foo varchar(32)', '--execute', "--resume=${job_id}",
|
||||
'--chunk-size=4',
|
||||
'--chunk-index=f2'
|
||||
) }
|
||||
|
239
t/pt-online-schema-change/pt-2355.t
Normal file
239
t/pt-online-schema-change/pt-2355.t
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use threads;
|
||||
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use Data::Dumper;
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
use SqlModes;
|
||||
use File::Temp qw/ tempdir tempfile /;
|
||||
|
||||
our $delay = 10;
|
||||
my $max_lag = $delay / 2;
|
||||
my $output;
|
||||
my $exit;
|
||||
|
||||
my $tmp_file = File::Temp->new();
|
||||
my $tmp_file_name = $tmp_file->filename;
|
||||
unlink $tmp_file_name;
|
||||
|
||||
require "$trunk/bin/pt-online-schema-change";
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
if ($sb->is_cluster_mode) {
|
||||
plan skip_all => 'Not for PXC';
|
||||
}
|
||||
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica_dbh1 = $sb->get_dbh_for('replica1');
|
||||
my $replica_dbh2 = $sb->get_dbh_for('replica2');
|
||||
my $source_dsn = 'h=127.0.0.1,P=12345,u=msandbox,p=msandbox';
|
||||
my $replica_dsn1 = 'h=127.0.0.1,P=12346,u=msandbox,p=msandbox';
|
||||
my $replica_dsn2 = 'h=127.0.0.1,P=12347,u=msandbox,p=msandbox';
|
||||
my $sample = "t/pt-online-schema-change/samples";
|
||||
my $plugin = "$trunk/$sample/plugins";
|
||||
|
||||
# We need sync_relay_log=1 to keep changes after replica restart
|
||||
my $cnf = '/tmp/12347/my.sandbox.cnf';
|
||||
diag(`cp $cnf $cnf.bak`);
|
||||
diag(`echo "[mysqld]" > /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "sync_relay_log=1" >> /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "sync_relay_log_info=1" >> /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "relay_log_recovery=1" >> /tmp/12347/my.sandbox.2.cnf`);
|
||||
diag(`echo "!include /tmp/12347/my.sandbox.2.cnf" >> $cnf`);
|
||||
diag(`/tmp/12347/stop >/dev/null`);
|
||||
sleep 1;
|
||||
diag(`/tmp/12347/start >/dev/null`);
|
||||
|
||||
sub reset_query_cache {
|
||||
my @dbhs = @_;
|
||||
return if ($sandbox_version ge '8.0');
|
||||
foreach my $dbh (@dbhs) {
|
||||
$dbh->do('RESET QUERY CACHE');
|
||||
}
|
||||
}
|
||||
|
||||
sub run_broken_job {
|
||||
my ($args) = @_;
|
||||
my ($fh, $filename) = tempfile();
|
||||
my $pid = fork();
|
||||
|
||||
if (!$pid) {
|
||||
open(STDERR, '>', $filename);
|
||||
open(STDOUT, '>', $filename);
|
||||
exec("$trunk/bin/pt-online-schema-change $args");
|
||||
}
|
||||
|
||||
sleep($max_lag + $max_lag/2);
|
||||
# stop replica 12347
|
||||
diag(`/tmp/12347/stop >/dev/null`);
|
||||
sleep 1;
|
||||
|
||||
waitpid($pid, 0);
|
||||
my $output = do {
|
||||
local $/ = undef;
|
||||
<$fh>;
|
||||
};
|
||||
|
||||
return $output;
|
||||
}
|
||||
|
||||
sub set_delay {
|
||||
$sb->wait_for_replicas();
|
||||
|
||||
diag("Setting replica delay to $delay seconds");
|
||||
diag(`/tmp/12345/use -N test -e "DROP TABLE IF EXISTS pt1717_back"`);
|
||||
|
||||
$replica_dbh1->do("STOP ${replica_name}");
|
||||
$replica_dbh1->do("CHANGE ${source_change} TO ${source_name}_DELAY=$delay");
|
||||
$replica_dbh1->do("START ${replica_name}");
|
||||
|
||||
# Run a full table scan query to ensure the replica is behind the source
|
||||
# There is no query cache in MySQL 8.0+
|
||||
reset_query_cache($source_dbh, $source_dbh);
|
||||
# Update one row so replica is delayed
|
||||
$source_dbh->do('UPDATE `test`.`pt1717` SET f2 = f2 + 1 LIMIT 1');
|
||||
$source_dbh->do('UPDATE `test`.`pt1717` SET f2 = f2 + 1 WHERE f1 = ""');
|
||||
|
||||
# Creating copy of table pt1717, so we can compare data later
|
||||
diag(`/tmp/12345/use -N test -e "CREATE TABLE pt1717_back like pt1717"`);
|
||||
diag(`/tmp/12345/use -N test -e "INSERT INTO pt1717_back SELECT * FROM pt1717"`);
|
||||
}
|
||||
|
||||
# 1) Set the replica delay to 0 just in case we are re-running the tests without restarting the sandbox.
|
||||
# 2) Load sample data
|
||||
# 3) Set the replica delay to 30 seconds to be able to see the 'waiting' message.
|
||||
diag("Setting replica delay to 0 seconds");
|
||||
$replica_dbh1->do("STOP ${replica_name}");
|
||||
$source_dbh->do("RESET ${source_reset}");
|
||||
$replica_dbh1->do("RESET ${replica_name}");
|
||||
$replica_dbh1->do("START ${replica_name}");
|
||||
|
||||
diag('Loading test data');
|
||||
$sb->load_file('source', "t/pt-online-schema-change/samples/pt-1717.sql");
|
||||
|
||||
# Should be greater than chunk-size and big enough, so pt-osc will wait for delay
|
||||
my $num_rows = 5000;
|
||||
my $chunk_size = 10;
|
||||
diag("Loading $num_rows into the table. This might take some time.");
|
||||
diag(`util/mysql_random_data_load --host=127.0.0.1 --port=12345 --user=msandbox --password=msandbox test pt1717 $num_rows`);
|
||||
|
||||
diag("Starting tests...");
|
||||
|
||||
set_delay();
|
||||
|
||||
# We need to sleep, otherwise pt-osc can finish before replica is delayed
|
||||
sleep($max_lag);
|
||||
|
||||
my $args = "$source_dsn,D=test,t=pt1717 --execute --chunk-size ${chunk_size} --max-lag $max_lag --alter 'ADD INDEX idx1(f1)' --pid $tmp_file_name --progress time,5 --no-drop-new-table --no-drop-triggers --history";
|
||||
|
||||
$output = run_broken_job($args);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/`test`.`pt1717` was not altered/s,
|
||||
"pt-osc stopped with error as expected",
|
||||
) or diag($output);
|
||||
|
||||
diag(`/tmp/12347/start >/dev/null`);
|
||||
$sb->wait_for_replicas();
|
||||
|
||||
$output = `/tmp/12345/use -N -e "select job_id, upper_boundary from percona.pt_osc_history"`;
|
||||
my ($job_id, $upper_boundary) = split(/\s+/, $output);
|
||||
|
||||
my $copied_rows = `/tmp/12345/use -N -e "select count(*) from test._pt1717_new"`;
|
||||
chomp($copied_rows);
|
||||
|
||||
ok(
|
||||
$copied_rows eq $upper_boundary,
|
||||
'Upper chunk boundary stored correctly'
|
||||
) or diag("Copied_rows: ${copied_rows}, upper boundary: ${upper_boundary}");;
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main("$source_dsn,D=test,t=pt1717",
|
||||
"--execute", "--chunk-size=${chunk_size}", "--max-lag=${max_lag}",
|
||||
"--alter=ADD INDEX idx1(f1)",
|
||||
"--resume=${job_id}",
|
||||
) }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
0,
|
||||
'pt-osc works correctly with --resume'
|
||||
) or diag($exit);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Successfully altered/,
|
||||
'Success message printed'
|
||||
) or diag($output);
|
||||
|
||||
# Corrupting job record, so we can test error message
|
||||
diag(`/tmp/12345/use -N -e "update percona.pt_osc_history set new_table_name=NULL where job_id=${job_id}"`);
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main("$source_dsn,D=test,t=pt1717",
|
||||
"--execute", "--chunk-size=${chunk_size}", "--max-lag=${max_lag}",
|
||||
"--alter=ADD INDEX idx1(f1)",
|
||||
"--resume=${job_id}",
|
||||
) }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
17,
|
||||
'pt-osc works correctly fails with empty boundaries'
|
||||
) or diag($exit);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Option --resume refers job \d+ with empty boundaries. Exiting./,
|
||||
'Correct error message printed'
|
||||
) or diag($output);
|
||||
|
||||
unlike(
|
||||
$output,
|
||||
qr/Option --resume refers non-existing job ID: \d+. Exiting./,
|
||||
'Misleading error message not printed'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
diag("Cleaning");
|
||||
$replica_dbh2 = $sb->get_dbh_for('replica2');
|
||||
diag("Setting replica delay to 0 seconds");
|
||||
$replica_dbh1->do("STOP ${replica_name}");
|
||||
$replica_dbh2->do("STOP ${replica_name}");
|
||||
$source_dbh->do("RESET ${source_reset}");
|
||||
$replica_dbh1->do("RESET ${source_reset}");
|
||||
$replica_dbh1->do("RESET ${replica_name}");
|
||||
$replica_dbh2->do("RESET ${replica_name}");
|
||||
$replica_dbh1->do("START ${replica_name}");
|
||||
$replica_dbh2->do("START ${replica_name}");
|
||||
|
||||
diag(`mv $cnf.bak $cnf`);
|
||||
|
||||
diag(`/tmp/12347/stop >/dev/null`);
|
||||
diag(`/tmp/12347/start >/dev/null`);
|
||||
|
||||
diag("Dropping test database");
|
||||
$source_dbh->do("DROP DATABASE IF EXISTS test");
|
||||
$sb->wait_for_replicas();
|
||||
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
done_testing;
|
71
t/pt-online-schema-change/pt-2407.t
Normal file
71
t/pt-online-schema-change/pt-2407.t
Normal file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-online-schema-change";
|
||||
require VersionParser;
|
||||
|
||||
use Data::Dumper;
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica';
|
||||
}
|
||||
|
||||
my @args = qw(--set-vars innodb_lock_wait_timeout=3);
|
||||
my $output = "";
|
||||
my $dsn = "h=127.1,P=12345,u=msandbox,p=msandbox";
|
||||
my $exit = 0;
|
||||
my $sample = "t/pt-online-schema-change/samples";
|
||||
|
||||
$sb->load_file('source', "$sample/pt-2407.sql");
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_2407,t=t1",
|
||||
'--alter', 'alter table t1 ADD COLUMN payout_group_id VARCHAR(255) DEFAULT NULL, ALGORITHM=INSTANT;', '--execute') }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
11,
|
||||
'Return code non-zero for failed operation'
|
||||
) or diag($exit);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/You have an error in your SQL syntax/,
|
||||
'Job failed due to SQL syntax error'
|
||||
) or diag($output);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Error altering new table/,
|
||||
'Error altering new table message printed'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
#
|
||||
done_testing;
|
120
t/pt-online-schema-change/pt-2422.t
Normal file
120
t/pt-online-schema-change/pt-2422.t
Normal file
@@ -0,0 +1,120 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
BEGIN {
|
||||
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
|
||||
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
|
||||
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
|
||||
};
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
use English qw(-no_match_vars);
|
||||
use Test::More;
|
||||
|
||||
use PerconaTest;
|
||||
use Sandbox;
|
||||
require "$trunk/bin/pt-online-schema-change";
|
||||
require VersionParser;
|
||||
|
||||
use Data::Dumper;
|
||||
|
||||
my $dp = new DSNParser(opts=>$dsn_opts);
|
||||
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
|
||||
my $source_dbh = $sb->get_dbh_for('source');
|
||||
my $replica_dbh = $sb->get_dbh_for('replica1');
|
||||
|
||||
if ( !$source_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox source';
|
||||
}
|
||||
elsif ( !$replica_dbh ) {
|
||||
plan skip_all => 'Cannot connect to sandbox replica';
|
||||
}
|
||||
|
||||
my @args = qw(--set-vars innodb_lock_wait_timeout=3);
|
||||
my $output = "";
|
||||
my $dsn = "h=127.1,P=12345,u=msandbox,p=msandbox";
|
||||
my $exit = 0;
|
||||
my $sample = "t/pt-online-schema-change/samples";
|
||||
|
||||
$sb->load_file('source', "$sample/basic_no_fks_innodb.sql");
|
||||
$source_dbh->do('CREATE TABLE pt_osc.pt_2422 LIKE pt_osc.t');
|
||||
$source_dbh->do('INSERT INTO pt_osc.pt_2422 SELECT * FROM pt_osc.t');
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_osc,t=t",
|
||||
'--alter', 'engine=innodb', '--execute', '--history') }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
0,
|
||||
'basic test with option --history finished OK'
|
||||
) or diag($output);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d started/,
|
||||
'Job id printed in the beginning of the tool output'
|
||||
);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d finished successfully/,
|
||||
'Job id printed for successful copy'
|
||||
);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "SELECT new_table_name FROM percona.pt_osc_history WHERE job_id=1"`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/_t_new/,
|
||||
'Correct new table name inserted'
|
||||
) or diag($output);
|
||||
|
||||
($output, $exit) = full_output(
|
||||
sub { pt_online_schema_change::main(@args, "$dsn,D=pt_osc,t=pt_2422",
|
||||
'--alter', 'engine=innodb', '--execute', '--history') }
|
||||
);
|
||||
|
||||
is(
|
||||
$exit,
|
||||
0,
|
||||
'basic test with second table and option --history finished OK'
|
||||
) or diag($output);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d started/,
|
||||
'Job id printed in the beginning of the tool output for the second table'
|
||||
);
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/Job \d finished successfully/,
|
||||
'Job id printed for successful copy of the second table'
|
||||
);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "SELECT new_table_name FROM percona.pt_osc_history WHERE job_id=1"`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/_t_new/,
|
||||
'New table name for previouse job was not updated'
|
||||
) or diag($output);
|
||||
|
||||
$output = `/tmp/12345/use -N -e "SELECT new_table_name FROM percona.pt_osc_history WHERE job_id=2"`;
|
||||
|
||||
like(
|
||||
$output,
|
||||
qr/_pt_2422_new/,
|
||||
'Correct new table name inserted for the second table'
|
||||
) or diag($output);
|
||||
|
||||
# #############################################################################
|
||||
# Done.
|
||||
# #############################################################################
|
||||
|
||||
$sb->wipe_clean($source_dbh);
|
||||
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
|
||||
#
|
||||
done_testing;
|
12
t/pt-online-schema-change/samples/pt-2407.sql
Normal file
12
t/pt-online-schema-change/samples/pt-2407.sql
Normal file
@@ -0,0 +1,12 @@
|
||||
CREATE DATABASE pt_2407;
|
||||
|
||||
USE pt_2407;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
c1 int NOT NULL,
|
||||
c2 varchar(100) NOT NULL,
|
||||
PRIMARY KEY (c1),
|
||||
KEY idx (c2)
|
||||
) ENGINE=InnoDB;
|
||||
|
||||
INSERT INTO t1 VALUES(1,1),(2,2),(3,3),(4,4),(5,5);
|
Reference in New Issue
Block a user