Compare commits

..

5 Commits

Author SHA1 Message Date
Sveta Smirnova
bf4a095c69 Merge branch '3.x' into release-3.5.1 2023-01-23 17:44:27 +03:00
EvgeniyPatlan
ddd0fcfaf9 PT-7 Fix syntax (#573) 2023-01-20 18:50:53 +03:00
Alina Derkach
ce14fbceba PT-2162 Release Notes 3.5.1 (#571)
modified:   Makefile.PL
	modified:   config/sphinx-build/conf.py
	modified:   docs/release_notes.rst
	new file:   docs/rn.3-5-1.txt

Co-authored-by: Alina Derkach <“alina.derkach@percona.com”>
2023-01-17 15:49:05 +03:00
svetasmirnova
a0930a4e64 RM-1153 - Percona Toolkit 3.5.1
Updated Changelog
Updated release date in docs/percona-toolkit.pod
2023-01-17 12:31:43 +03:00
svetasmirnova
4a33e5e52b RM-1153 - Percona Toolkit 3.5.1
Updated version for Perl files to 3.5.1
Updated Perl module Percona::Toolkit to version 3.5.1 and updated all scripts using this module
Manually updated Go programs
Moved ONLY_UPDATE_VERSION after definition of DATE and SERIES variables, so the script does not fail because of undefined variables
2023-01-17 12:20:25 +03:00
49 changed files with 225 additions and 795 deletions

View File

@@ -1,5 +0,0 @@
- [ ] The contributed code is licensed under GPL v2.0
- [ ] Contributor Licence Agreement (CLA) is signed
- [ ] util/update-modules has been ran
- [ ] Documention updated
- [ ] Test suite update

View File

@@ -1958,7 +1958,7 @@ sub parse {
my $engine = $self->get_engine($ddl);
my @defs = $ddl =~ m/(?:(?<=,\n)|(?<=\(\n))(\s+`(?:.|\n)+?`.+?),?\n/g;
my @defs = $ddl =~ m/^(\s+`.*?),?$/gm;
my @cols = map { $_ =~ m/`([^`]+)`/ } @defs;
PTDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols));
@@ -2139,7 +2139,7 @@ sub get_keys {
my $clustered_key = undef;
KEY:
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY \(?`[\s\S]*?`\),?)$/gm ) {
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) {
next KEY if $key =~ m/FOREIGN/;
@@ -2150,7 +2150,7 @@ sub get_keys {
$key =~ s/USING HASH/USING BTREE/;
}
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \(([\s\S]+?)\)/;
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/;
my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/;
$type = $type || $special || 'BTREE';
my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/;
@@ -6097,9 +6097,6 @@ $Data::Dumper::Quotekeys = 0;
use Percona::Toolkit;
use constant PTDEBUG => $ENV{PTDEBUG} || 0;
# We need SQL types in order to properly handle BIT columns in WHERE clause
use DBI qw(:sql_types);
# Global variables; as few as possible.
my $oktorun = 1;
my $txn_cnt = 0;
@@ -6603,9 +6600,7 @@ sub main {
. ($o->get('replace') ? ' REPLACE' : '')
. ($o->get('ignore') ? ' IGNORE' : '')
. " INTO TABLE $dst->{db_tbl}"
. ($got_charset ? "CHARACTER SET $got_charset" :
( $src->{info}->{charset} && $src->{info}->{charset} =~ /utf/ ?
"CHARACTER SET utf8mb4" : "" ))
. ($got_charset ? "CHARACTER SET $got_charset" : "")
. "("
. join(",", map { $q->quote($_) } @{$ins_stmt->{cols}} )
. ")";
@@ -6782,10 +6777,6 @@ sub main {
require File::Temp;
$bulkins_file = File::Temp->new( SUFFIX => 'pt-archiver' )
or die "Cannot open temp file: $OS_ERROR\n";
if ( !$charset && $src->{info}->{charset}
&& $src->{info}->{charset} =~ /utf/ ) {
binmode($bulkins_file, ':utf8')
}
binmode($bulkins_file, $charset)
or die "Cannot set $charset as an encoding for the bulk-insert "
. "file: $OS_ERROR";
@@ -6869,15 +6860,7 @@ sub main {
}
if ( !$o->get('no-delete') ) {
my $success = do_with_retries($o, 'deleting', sub {
# We have to make exception for BIT column, see PT-2114
for my $i (0 .. $#del_slice) {
if ($src->{info}->{type_for}->{$del_stmt->{cols}[$del_slice[$i]]} eq 'bit') {
$del_row->bind_param($i + 1, oct('0b' . unpack('B*', @{$row}[$del_slice[$i]])), SQL_INTEGER);
} else {
$del_row->bind_param($i + 1, @{$row}[$del_slice[$i]]);
}
}
$del_row->execute();
$del_row->execute(@{$row}[@del_slice]);
PTDEBUG && _d('Deleted', $del_row->rows, 'rows');
$statistics{DELETE} += $del_row->rows;
});
@@ -6980,17 +6963,10 @@ sub main {
}
if ( !$o->get('no-delete') ) {
my $success = do_with_retries($o, 'bulk_deleting', sub {
# We have to make exception for BIT column, see PT-2114
for my $i (0 .. $#bulkdel_slice) {
if ($src->{info}->{type_for}->{$del_stmt->{cols}[$bulkdel_slice[$i]]} eq 'bit') {
$del_row->bind_param($i + 1, oct('0b' . unpack('B*', @{$first_row}[$bulkdel_slice[$i]])), SQL_INTEGER);
$del_row->bind_param($i + $#bulkdel_slice + 2, oct('0b' . unpack('B*', @{$lastrow}[$bulkdel_slice[$i]])), SQL_INTEGER);
} else {
$del_row->bind_param($i + 1, @{$first_row}[$bulkdel_slice[$i]]);
$del_row->bind_param($i + $#bulkdel_slice + 2, @{$lastrow}[$bulkdel_slice[$i]]);
}
}
$del_row->execute();
$del_row->execute(
@{$first_row}[@bulkdel_slice],
@{$lastrow}[@bulkdel_slice],
);
PTDEBUG && _d('Bulk deleted', $del_row->rows, 'rows');
$statistics{DELETE} += $del_row->rows;
});
@@ -7883,7 +7859,7 @@ Pause archiving if the slave given by L<"--check-slave-lag"> lags.
This option causes pt-archiver to look at the slave every time it's about
to fetch another row. If the slave's lag is greater than the option's value,
or if the slave isn't running (so its lag is NULL), pt-archiver sleeps
or if the slave isn't running (so its lag is NULL), pt-table-checksum sleeps
for L<"--check-interval"> seconds and then looks at the lag again. It repeats
until the slave is caught up, then proceeds to fetch and archive the row.

View File

@@ -1185,12 +1185,7 @@ collect_encrypted_tables() {
}
collect_encrypted_tablespaces() {
local version="$1"
if [ "$version" '<' "8.0" ]; then
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES where FLAG&8192 = 8192;"
else
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_TABLESPACES where FLAG&8192 = 8192;"
fi
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES where FLAG&8192 = 8192;"
}
@@ -2526,9 +2521,8 @@ report_mysql_summary () {
local encrypted_tables=""
local encrypted_tablespaces=""
if [ "${OPT_LIST_ENCRYPTED_TABLES}" = 'yes' ]; then
local mysql_version="$(get_var version "$dir/mysql-variables")"
encrypted_tables="$(collect_encrypted_tables)"
encrypted_tablespaces="$(collect_encrypted_tablespaces ${mysql_version})"
encrypted_tablespaces="$(collect_encrypted_tablespaces)"
fi
format_keyring_plugins "$keyring_plugins" "$encrypted_tables"

View File

@@ -12759,9 +12759,9 @@ exist.
=item --null-to-not-null
Allows MODIFYing a column that allows NULL values to one that doesn't allow
them. The existing rows which contain NULL values will be converted to the default value
based on datatype, e.g. 0 for number datatypes, '' for string datatypes.
New rows will use the user defined default value if specified for the column.
them. The rows which contain NULL values will be converted to the defined
default value. If no explicit DEFAULT value is given MySQL will assign a default
value based on datatype, e.g. 0 for number datatypes, '' for string datatypes.
=item --only-same-schema-fks

View File

@@ -1276,16 +1276,11 @@ slave_status() {
local outfile=$1
local mysql_version=$2
if [ "${mysql_version}" '<' "8.1" ]; then
local sql="SHOW SLAVE STATUS\G"
if [ "${mysql_version}" '<' "5.7" ]; then
local sql="SHOW SLAVE STATUS\G"
echo -e "\n$sql\n" >> $outfile
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
else
local sql="SHOW REPLICA STATUS\G"
fi
echo -e "\n$sql\n" >> $outfile
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
if [ "${mysql_version}" '>' "5.6" ]; then
local sql="SELECT * FROM performance_schema.replication_connection_configuration JOIN performance_schema.replication_applier_configuration USING(channel_name)\G"
echo -e "\n$sql\n" >> $outfile
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile

View File

@@ -4516,7 +4516,7 @@ sub parse {
my $engine = $self->get_engine($ddl);
my @defs = $ddl =~ m/(?:(?<=,\n)|(?<=\(\n))(\s+`(?:.|\n)+?`.+?),?\n/g;
my @defs = $ddl =~ m/^(\s+`.*?),?$/gm;
my @cols = map { $_ =~ m/`([^`]+)`/ } @defs;
PTDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols));
@@ -4697,7 +4697,7 @@ sub get_keys {
my $clustered_key = undef;
KEY:
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY \(?`[\s\S]*?`\),?)$/gm ) {
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) {
next KEY if $key =~ m/FOREIGN/;
@@ -4708,7 +4708,7 @@ sub get_keys {
$key =~ s/USING HASH/USING BTREE/;
}
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \(([\s\S]+?)\)/;
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/;
my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/;
$type = $type || $special || 'BTREE';
my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

View File

@@ -1,3 +0,0 @@
<svg width="162" height="140" viewBox="0 0 162 140" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M131.446 87.9231C148.661 76.6669 154.283 53.7221 143.868 35.6906C138.642 26.6448 130.209 20.1698 120.118 17.4549C110.76 14.9442 101.005 15.9773 92.4039 20.314L80.6672 0L56.3049 42.1896L0 139.711H161.346L131.446 87.9231ZM117.367 27.786C124.707 29.7442 130.809 34.4533 134.606 41.0123C142.078 53.9383 138.197 70.3361 126.076 78.625L97.7737 29.6C103.876 26.6688 110.748 26.0321 117.367 27.786ZM80.6792 21.4071L142.798 128.995H106.459L62.4916 52.9052L80.6672 21.4192L80.6792 21.4071ZM18.5601 129.007L56.3169 63.6328L94.0737 129.007H18.5601Z" fill="#FF7E1A"/>
</svg>

Before

Width:  |  Height:  |  Size: 671 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

View File

@@ -1,9 +1,12 @@
<svg width="520" height="451" viewBox="0 0 520 451" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M423.631 283.376C479.103 247.116 497.253 173.155 463.684 115.024C446.863 85.8662 419.673 64.9845 387.145 56.265C357.008 48.1741 325.531 51.4826 297.826 65.4793L260 0L181.483 135.988L0 450.285H520L423.631 283.376ZM378.25 89.5766C401.894 95.8741 421.57 111.056 433.825 132.195C457.912 173.866 445.399 226.719 406.336 253.414L315.1 95.4103C334.755 85.9589 356.925 83.8976 378.25 89.5766ZM260 69.0145L460.2 415.767H343.063L201.406 170.496L259.99 69.0248L260 69.0145ZM59.8002 415.767L181.472 205.075L303.144 415.767H59.8002Z" fill="url(#paint0_linear_552_15)"/>
<defs>
<linearGradient id="paint0_linear_552_15" x1="58.6494" y1="460.228" x2="444.575" y2="74.3019" gradientUnits="userSpaceOnUse">
<stop stop-color="#FC3519"/>
<stop offset="1" stop-color="#F0D136"/>
</linearGradient>
</defs>
<svg width="74" height="74" viewBox="0 0 74 74" fill="none" xmlns="http://www.w3.org/2000/svg">
<circle cx="37" cy="37" r="37" fill="url(#paint0_linear)"/>
<path d="M37.5003 58C49.3744 58 59 48.3743 59 36.5C59 24.6257 49.3744 15 37.5003 15C25.6261 15 16 24.6257 16 36.5C16 36.6671 16.0019 36.8338 16.0057 37H16V67.4672C18.744 69.3622 21.7566 70.896 24.969 72H25V53.9948C28.5225 56.5161 32.8381 58 37.5003 58Z" fill="white"/>
<path d="M50 36.5C50 43.4036 44.4037 49 37.5001 49C30.5966 49 25 43.4036 25 36.5C25 29.5964 30.5966 24 37.5001 24C44.4037 24 50 29.5964 50 36.5Z" fill="#FCB42F"/>
<defs>
<linearGradient id="paint0_linear" x1="37" y1="0" x2="37" y2="74" gradientUnits="userSpaceOnUse">
<stop stop-color="#FBB32F"/>
<stop offset="0.504739" stop-color="#C11511"/>
<stop offset="1" stop-color="#C11411"/>
</linearGradient>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 901 B

After

Width:  |  Height:  |  Size: 878 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 894 B

View File

@@ -12,13 +12,13 @@ html_theme_options = {
'base_url': 'http://bashtage.github.io/sphinx-material/',
'repo_url': 'https://github.com/percona/percona-toolkit',
'repo_name': 'percona/percona-toolkit',
'color_accent': 'cyan',
'color_primary': 'blue-grey',
'color_accent': 'grey',
'color_primary': 'orange',
'globaltoc_collapse': True,
'version_dropdown': True
}
html_logo = '../_static/percona-logo.svg'
html_favicon = '../_static/percona-favicon.ico'
html_favicon = '../_static/percona_favicon.ico'
pygments_style = 'emacs'
gitstamp_fmt = "%b %d, %Y"
# Specify the text pattern that won't be copied with the code block contents

View File

@@ -203,7 +203,7 @@ htmlhelp_basename = 'PerconaToolkitdoc'
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PerconaToolkit.tex', u'Percona Toolkit Documentation',
u'2023, Percona LLC and/or its affiliates', 'manual'),
u'2022, Percona LLC and/or its affiliates', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -237,5 +237,5 @@ latex_toplevel_sectioning = 'part'
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'perconatoolkit', u'Percona Toolkit Documentation',
[u'2023, Percona LLC and/or its affiliates'], 1)
[u'2022, Percona LLC and/or its affiliates'], 1)
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

View File

@@ -122,7 +122,7 @@ Usage
``pt-k8s-debug-collector <flags>``
Supported Flags
Supported Flags:
================
``--resource``
@@ -157,18 +157,12 @@ Path to kubeconfig. Default configuration be used if none specified
Port to use when collecting database-specific summaries. By default, 3306 will be used for PXC and MySQL, 27017 for MongoDB, and 5432 for PostgreSQL
``--version``
Print version info
Requirements
============
- Installed, configured, and available in PATH ``kubectl``
- Installed, configured, and available in PATH ``pt-mysql-summary`` for PXC and MySQL
- Installed, configured, and available in PATH ``mysql`` for PXC and MySQL
- Installed, configured, and available in PATH ``pt-mongodb-summary`` for MongoDB
- Installed, configured, and available in PATH ``psql`` for PostgreSQL
Known Issues
============
@@ -184,4 +178,4 @@ On Kubernetes 1.21 - 1.24 warning is printed:
metadata:
resourceVersion: ""
This warning is harmless and does not affect data collection. We will remove podsecuritypolicies once everyone upgrade to Kubernetes 1.25 or newer. Before that we advise to ignore this warning.
This warning is harmless and does not affect data collection. We will remove podsecuritypolicies once everyone upgrade to Kubernetes 1.25 or newer. Before that we advise to ignore this warning.

View File

@@ -2,7 +2,6 @@ Percona Toolkit
***************
v3.5.1 released 2023-01-23
==============================
New Features
------------------------------------------------------------

View File

@@ -149,7 +149,7 @@ sub parse {
my $engine = $self->get_engine($ddl);
my @defs = $ddl =~ m/(?:(?<=,\n)|(?<=\(\n))(\s+`(?:.|\n)+?`.+?),?\n/g;
my @defs = $ddl =~ m/^(\s+`.*?),?$/gm;
my @cols = map { $_ =~ m/`([^`]+)`/ } @defs;
PTDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols));
@@ -389,8 +389,7 @@ sub get_keys {
my $clustered_key = undef;
KEY:
#foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) {
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY \(?`[\s\S]*?`\),?)$/gm ) {
foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) {
# If you want foreign keys, use get_fks() below.
next KEY if $key =~ m/FOREIGN/;
@@ -408,7 +407,7 @@ sub get_keys {
}
# Determine index type
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \(([\s\S]+?)\)/;
my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/;
my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/;
$type = $type || $special || 'BTREE';
my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/;

View File

@@ -580,16 +580,11 @@ slave_status() {
local outfile=$1
local mysql_version=$2
if [ "${mysql_version}" '<' "8.1" ]; then
local sql="SHOW SLAVE STATUS\G"
if [ "${mysql_version}" '<' "5.7" ]; then
local sql="SHOW SLAVE STATUS\G"
echo -e "\n$sql\n" >> $outfile
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
else
local sql="SHOW REPLICA STATUS\G"
fi
echo -e "\n$sql\n" >> $outfile
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile
if [ "${mysql_version}" '>' "5.6" ]; then
local sql="SELECT * FROM performance_schema.replication_connection_configuration JOIN performance_schema.replication_applier_configuration USING(channel_name)\G"
echo -e "\n$sql\n" >> $outfile
$CMD_MYSQL $EXT_ARGV -e "$sql" >> $outfile

View File

@@ -116,14 +116,9 @@ collect_encrypted_tables() {
}
collect_encrypted_tablespaces() {
local version="$1"
# I_S.INNODB_[SYS_]TABLESPACES has a "flag" field. Encrypted tablespace has bit 14 set. You can check it with "flag & 8192".
# I_S.INNODB_SYS_TABLESPACES has a "flag" field. Encrypted tablespace has bit 14 set. You can check it with "flag & 8192".
# And seems like MySQL is capable of bitwise operations. https://dev.mysql.com/doc/refman/5.7/en/bit-functions.html
if [ "$version" '<' "8.0" ]; then
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES where FLAG&8192 = 8192;"
else
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_TABLESPACES where FLAG&8192 = 8192;"
fi
$CMD_MYSQL $EXT_ARGV --table -ss -e "SELECT SPACE, NAME, SPACE_TYPE from INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES where FLAG&8192 = 8192;"
}
@@ -1585,9 +1580,8 @@ report_mysql_summary () {
local encrypted_tables=""
local encrypted_tablespaces=""
if [ "${OPT_LIST_ENCRYPTED_TABLES}" = 'yes' ]; then
local mysql_version="$(get_var version "$dir/mysql-variables")"
encrypted_tables="$(collect_encrypted_tables)"
encrypted_tablespaces="$(collect_encrypted_tablespaces ${mysql_version})"
encrypted_tablespaces="$(collect_encrypted_tablespaces)"
fi
format_keyring_plugins "$keyring_plugins" "$encrypted_tables"

View File

@@ -57,26 +57,21 @@ plugins_dir_cmd=""
plugins_cmd=""
keyring_cmd=""
if [ -z "${DISABLE_KEYRING}" ]; then
if [ -e "${BASEDIR}/lib/mysql/plugin/keyring_file.so" ]; then
encryption_plugins="${BASEDIR}/lib/mysql/plugin/keyring_file.so"
plugins_dir_cmd="--plugin-dir=${BASEDIR}/lib/mysql/plugin/"
elif [ -e "${BASEDIR}/lib/plugin/keyring_file.so" ]; then
encryption_plugins="keyring_file.so"
plugins_dir_cmd="--plugin-dir=${BASEDIR}/lib/plugin/"
fi
if [ -e "${BASEDIR}/lib/mysql/plugin/keyring_file.so" ]; then
encryption_plugins="${BASEDIR}/lib/mysql/plugin/keyring_file.so"
fi
if [ ! -z "$encryption_plugins" ]; then
plugins_cmd="--early-plugin-load=${encryption_plugins}"
keyring_cmd="--loose-keyring_file_data=/tmp/PORT/data/keyring"
fi
if [ ! -z "$encryption_plugins" ]; then
plugins_cmd="--early-plugin-load=${encryption_plugins}"
keyring_cmd="--keyring_file_data=/tmp/PORT/data/keyring"
plugins_dir_cmd="--plugin-dir=${BASEDIR}/lib/mysql/plugin/"
fi
init_file="/tmp/PORT/mysql-init"
if [ -e $init_file ]; then
$BASEDIR/bin/mysqld --defaults-file=/tmp/PORT/my.sandbox.cnf -u root --init-file $init_file $plugins_dir_cmd $plugins_cmd $keyring_cmd &
$BASEDIR/bin/mysqld --defaults-file=/tmp/PORT/my.sandbox.cnf -u root --init-file $init_file $plugins_cmd $plugins_dir_cmd $keyring_cmd &
else
$BASEDIR/bin/mysqld --defaults-file=/tmp/PORT/my.sandbox.cnf $plugins_dir_cmd $plugins_cmd $keyring_cmd > /dev/null 2>&1 &
$BASEDIR/bin/mysqld --defaults-file=/tmp/PORT/my.sandbox.cnf $plugins_cmd $plugins_dir_cmd $keyring_cmd > /dev/null 2>&1 &
fi
cd $PWD

View File

@@ -122,7 +122,7 @@ Usage
``pt-k8s-debug-collector <flags>``
Supported Flags
Supported Flags:
================
``--resource``
@@ -157,18 +157,12 @@ Path to kubeconfig. Default configuration be used if none specified
Port to use when collecting database-specific summaries. By default, 3306 will be used for PXC and MySQL, 27017 for MongoDB, and 5432 for PostgreSQL
``--version``
Print version info
Requirements
============
- Installed, configured, and available in PATH ``kubectl``
- Installed, configured, and available in PATH ``pt-mysql-summary`` for PXC and MySQL
- Installed, configured, and available in PATH ``mysql`` for PXC and MySQL
- Installed, configured, and available in PATH ``pt-mongodb-summary`` for MongoDB
- Installed, configured, and available in PATH ``psql`` for PostgreSQL
Known Issues
============
@@ -184,4 +178,4 @@ On Kubernetes 1.21 - 1.24 warning is printed:
metadata:
resourceVersion: ""
This warning is harmless and does not affect data collection. We will remove podsecuritypolicies once everyone upgrade to Kubernetes 1.25 or newer. Before that we advise to ignore this warning.
This warning is harmless and does not affect data collection. We will remove podsecuritypolicies once everyone upgrade to Kubernetes 1.25 or newer. Before that we advise to ignore this warning.

View File

@@ -2,50 +2,26 @@ package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/percona/percona-toolkit/src/go/pt-k8s-debug-collector/dumper"
)
const (
TOOLNAME = "pt-k8s-debug-collector"
)
// We do not set anything here, these variables are defined by the Makefile
var (
Build string
GoVersion string
Version string
Commit string
)
func main() {
namespace := ""
resource := ""
clusterName := ""
kubeconfig := ""
forwardport := ""
version := false
flag.StringVar(&namespace, "namespace", "", "Namespace for collecting data. If empty data will be collected from all namespaces")
flag.StringVar(&resource, "resource", "none", "Collect data, specific to the resource. Supported values: pxc, psmdb, pg, ps, none")
flag.StringVar(&clusterName, "cluster", "", "Cluster name")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to kubeconfig")
flag.StringVar(&forwardport, "forwardport", "", "Port to use for port forwarding")
flag.BoolVar(&version, "version", false, "Print version")
flag.Parse()
if version {
fmt.Println(TOOLNAME)
fmt.Printf("Version %s\n", Version)
fmt.Printf("Build: %s using %s\n", Build, GoVersion)
fmt.Printf("Commit: %s\n", Commit)
return
}
if len(clusterName) > 0 {
resource += "/" + clusterName
}

View File

@@ -7,7 +7,6 @@ import (
"path"
"strings"
"testing"
"regexp"
"golang.org/x/exp/slices"
)
@@ -23,7 +22,7 @@ This test requires:
You can additionally set option FORWARDPORT if you want to use custom port when testing summaries.
pt-mysql-summary, mysql, psql, and pt-mongodb-summary must be in the PATH.
pt-mysql-summary and pt-mongodb-summary must be in the PATH.
Since running pt-k8s-debug-collector may take long time run go test with increase timeout:
go test -timeout 6000s
@@ -159,18 +158,3 @@ func TestResourceOption(t *testing.T) {
}
}
}
/*
Option --version
*/
func TestVersionOption(t *testing.T) {
out, err := exec.Command("../../../bin/pt-k8s-debug-collector", "--version").Output()
if err != nil {
t.Errorf("error executing pt-k8s-debug-collector --version: %s", err.Error())
}
// We are using MustCompile here, because hard-coded RE should not fail
re := regexp.MustCompile(TOOLNAME + `\n.*Version \d+\.\d+\.\d+\n`)
if !re.Match(out) {
t.Errorf("pt-k8s-debug-collector --version returns wrong result:\n%s", out)
}
}

View File

@@ -103,13 +103,9 @@ for my $char ( "\N{KATAKANA LETTER NI}", "\N{U+DF}" ) {
my $sql = qq{INSERT INTO `bug_1127450`.`original` VALUES (1, ?)};
$utf8_dbh->prepare($sql)->execute($char);
# We need to have --no-check-charset here, because utf8 that we use in the test file
# is alias of utf8mb3 in 5.7 and alias of utf8mb4 in 8.0.
# We cannot set this character set explicitly due to Perl limitations.
# Changing utf8 to utf8mb4 will break test on 5.7
$output = output(
sub { pt_archiver::main(qw(--no-ascend --limit 50 --bulk-insert),
qw(--bulk-delete --where 1=1 --statistics --charset utf8 --no-check-charset),
qw(--bulk-delete --where 1=1 --statistics --charset utf8),
'--source', "L=1,D=bug_1127450,t=original,F=$cnf",
'--dest', "t=copy") }, stderr => 1
);
@@ -139,32 +135,6 @@ for my $char ( "\N{KATAKANA LETTER NI}", "\N{U+DF}" ) {
"Warns about the UTF-8 bug in DBD::mysql::VERSION lt '4', quiet otherwise"
);
}
# #############################################################################
# PT-2123: pt-archiver gives error "Wide character in print at
# /usr/bin/pt-archiver line 6815" when using --bulk-insert
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2123.sql');
$dbh->do('set names "utf8mb4"');
my $original_rows = $dbh->selectall_arrayref('select col2 from pt_2123.t1 where col1=5');
$output = output(
sub { pt_archiver::main(
'--source', 'L=1,h=127.1,P=12345,D=pt_2123,t=t1,u=msandbox,p=msandbox,A=utf8mb4',
'--dest', 'L=1,h=127.1,P=12345,D=pt_2123,t=t2,u=msandbox,p=msandbox,A=utf8mb4',
qw(--where col1=5 --bulk-insert --limit=100 --purge))
},
);
my $archived_rows = $dbh->selectall_arrayref('select col2 from pt_2123.t2');
is_deeply(
$original_rows,
$archived_rows,
"UTF8 characters copied successfully with --bulk-insert"
);
# #############################################################################
# Done.
# #############################################################################

View File

@@ -23,15 +23,11 @@ my $dbh = $sb->get_dbh_for('master');
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
elsif ($sandbox_version gt '5.7') {
plan tests => 4;
}
else {
plan tests => 5;
}
my $output;
my $archived_rows;
# #############################################################################
# Issue 1152: mk-archiver columns option resulting in null archived table data
@@ -61,6 +57,7 @@ SKIP: {
"--check-charset"
);
}
$output = output(
sub { pt_archiver::main(
'--source', 'h=127.1,P=12345,D=issue_1225,t=t,u=msandbox,p=msandbox',
@@ -69,13 +66,12 @@ $output = output(
},
);
$archived_rows = $dbh->selectall_arrayref('select c from issue_1225.a limit 2');
my $archived_rows = $dbh->selectall_arrayref('select c from issue_1225.a limit 2');
ok(
$original_rows->[0]->[0] ne $archived_rows->[0]->[0],
"UTF8 characters lost when cxn isn't also UTF8"
);
}
$sb->load_file('master', 't/pt-archiver/samples/issue_1225.sql');
@@ -83,7 +79,7 @@ $output = output(
sub { pt_archiver::main(
'--source', 'h=127.1,P=12345,D=issue_1225,t=t,u=msandbox,p=msandbox',
'--dest', 't=a',
qw(--no-check-charset --where 1=1 --purge -A utf8)) # -A utf8 makes it work
qw(--where 1=1 --purge -A utf8)) # -A utf8 makes it work
},
);

View File

@@ -33,7 +33,7 @@ elsif ( $DBD::mysql::VERSION lt '4' ) {
my $output;
my $rows;
my $cnf = "/tmp/12345/my.sandbox.cnf";
my $file = "/tmp/pt-archiver-file.txt";
my $file = "/tmp/mk-archiver-file.txt";
# #############################################################################
# Issue 1229: mk-archiver not creating UTF8 compatible file handles for
@@ -53,15 +53,11 @@ is_deeply(
diag(`rm -rf $file >/dev/null`);
# We need to have --no-check-charset here, because utf8 that we use in the test file
# is alias of utf8mb3 in 5.7 and alias of utf8mb4 in 8.0.
# We cannot set this character set explicitly due to Perl limitations.
# Changing utf8 to utf8mb4 will break test on 5.7
$output = output(
sub { pt_archiver::main(
'--source', 'h=127.1,P=12345,D=issue_1225,t=t,u=msandbox,p=msandbox',
'--file', $file,
qw(--no-check-charset --where 1=1 -A UTF8)) # -A utf8 makes it work
qw(--where 1=1 -A UTF8)) # -A utf8 makes it work
},
stderr => 1,
);

109
t/pt-archiver/pt-1898.t Normal file
View File

@@ -0,0 +1,109 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use threads;
use English qw(-no_match_vars);
use Test::More;
use Data::Dumper;
use PerconaTest;
use Sandbox;
use SqlModes;
use File::Temp qw/ tempdir tempfile /;
if ($ENV{PERCONA_SLOW_BOX}) {
plan skip_all => 'This test needs a fast machine';
} else {
plan tests => 6;
#plan skip_all => 'This test is taking too much time even in fast machines';
}
our $delay = 15;
my $tmp_file = File::Temp->new();
my $tmp_file_name = $tmp_file->filename;
unlink $tmp_file_name;
require "$trunk/bin/pt-archiver";
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $master_dbh = $sb->get_dbh_for('master');
my $slave_dbh = $sb->get_dbh_for('slave1');
my $master_dsn = 'h=127.0.0.1,P=12345,u=msandbox,p=msandbox';
my $slave_dsn = 'h=127.0.0.1,P=12346,u=msandbox,p=msandbox';
sub reset_query_cache {
my @dbhs = @_;
return if ($sandbox_version >= '8.0');
foreach my $dbh (@dbhs) {
$dbh->do('RESET QUERY CACHE');
}
}
# 1) Set the slave delay to 0 just in case we are re-running the tests without restarting the sandbox.
# 2) Load sample data
# 3) Set the slave delay to 30 seconds to be able to see the 'waiting' message.
diag("Setting slave delay to 0 seconds");
$slave_dbh->do('STOP SLAVE');
$slave_dbh->do('RESET SLAVE');
$slave_dbh->do('START SLAVE');
diag('Loading test data');
$sb->load_file('master', "t/pt-online-schema-change/samples/pt-1898.sql");
my $num_rows = 5000;
diag("Loading $num_rows into the table. This might take some time.");
diag(`util/mysql_random_data_load --host=127.0.0.1 --port=12345 --user=msandbox --password=msandbox test pt178 $num_rows`);
diag("Setting slave delay to $delay seconds");
$slave_dbh->do('STOP SLAVE');
$slave_dbh->do("CHANGE MASTER TO MASTER_DELAY=$delay");
$slave_dbh->do('START SLAVE');
# Run a full table scan query to ensure the slave is behind the master
# There is no query cache in MySQL 8.0+
reset_query_cache($master_dbh, $master_dbh);
$master_dbh->do('UPDATE `test`.`joinit` SET g = g + 1 WHERE g <> 0');
# This is the base test, ust to ensure that without using --check-slave-lag nor --skip-check-slave-lag
# pt-online-schema-change will wait on the slave at port 12346
my $max_lag = $delay / 2;
my @args = ("--source", "$master_dsn,D=test,t=joinit", "--purge", "--where", "'g < 45'",
"--commit-each", "--limit", "500", "--statistics", "--check-slave-lag", "h=127.0.0.1,P=12346,u=msandbox,p=msandbox",
"--max-lag", "1");
diag("Starting base test. This is going to take some time due to the delay in the slave");
diag("pid: $tmp_file_name");
my $argstr = join(@args, " ");
my $output = `$trunk/bin/pt-online-schema-change $argstr 2>&1`;
like(
$output,
qr/Replica lag is \d+ seconds on .* Waiting/s,
"Base test waits on the correct slave",
);
diag("Setting slave delay to 0 seconds");
$slave_dbh->do('STOP SLAVE');
$slave_dbh->do('RESET SLAVE');
$slave_dbh->do('START SLAVE');
$master_dbh->do("DROP DATABASE IF EXISTS test");
# #############################################################################
# Done.
# #############################################################################
$sb->wipe_clean($master_dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
done_testing;

View File

@@ -1,286 +0,0 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Test::More;
use PerconaTest;
use Sandbox;
require "$trunk/bin/pt-archiver";
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
else {
plan tests => 23;
}
my $output;
# #############################################################################
# PT-2114: Incorrect casting of BIT columns by pt-archiver
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2114.sql');
my $zero_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1 where val = 0');
my $exit_status;
$output = output(
sub { $exit_status = pt_archiver::main(
'--source', 'h=127.1,P=12345,D=pt_2114,t=t1,u=msandbox,p=msandbox,A=utf8mb4',
'--where', '(val) in (select a.val from pt_2114.t1_tmp a where id =2)',
'--purge')
},
);
is (
$exit_status,
0,
"PT-2114 exit status OK",
);
my $left_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1');
is_deeply(
$zero_rows,
$left_rows,
"PT-2114 Only rows with val=0 left in the table"
);
my $count_rows = $dbh->selectrow_arrayref('select count(*) from pt_2114.t1');
is (
@{$count_rows}[0],
4,
"PT-2114 Four rows left in the table"
);
# #############################################################################
# Reloading dump to perform archiving
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2114.sql');
my $one_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1 where val = 1');
$output = output(
sub { $exit_status = pt_archiver::main(
'--source', 'h=127.1,P=12345,D=pt_2114,t=t1,u=msandbox,p=msandbox,A=utf8mb4',
'--dest', 'h=127.1,P=12345,D=pt_2114,t=t2,u=msandbox,p=msandbox,A=utf8mb4',
'--where', '(val) in (select a.val from pt_2114.t1_tmp a where id =2)',
)
},
);
is (
$exit_status,
0,
"PT-2114 exit status OK",
);
$left_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1');
is_deeply(
$zero_rows,
$left_rows,
"PT-2114 Only rows with val=0 left in the table"
);
$count_rows = $dbh->selectrow_arrayref('select count(*) from pt_2114.t1');
is (
@{$count_rows}[0],
4,
"PT-2114 Four rows left in the table"
);
my $archived_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t2');
is_deeply(
$one_rows,
$archived_rows,
"PT-2114 Correct rows archived"
);
# #############################################################################
# Reloading dump to perform archiving
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2114.sql');
$output = output(
sub { $exit_status = pt_archiver::main(
'--source', 'h=127.1,P=12345,D=pt_2114,t=t1,u=msandbox,p=msandbox,A=utf8mb4,L=yes',
'--dest', 'h=127.1,P=12345,D=pt_2114,t=t2,u=msandbox,p=msandbox,A=utf8mb4,L=yes',
'--where', '(val) in (select a.val from pt_2114.t1_tmp a where id =2)',
'--bulk-insert', '--limit', '10')
},
);
is (
$exit_status,
0,
"PT-2114 exit status OK",
);
$left_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1');
is_deeply(
$zero_rows,
$left_rows,
"PT-2114 Only rows with val=0 left in the table with --bulk-insert"
);
$count_rows = $dbh->selectrow_arrayref('select count(*) from pt_2114.t1');
is (
@{$count_rows}[0],
4,
"PT-2114 Four rows left in the table"
);
$archived_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t2');
is_deeply(
$one_rows,
$archived_rows,
"PT-2114 Correct rows archived with --bulk-insert"
);
# #############################################################################
# Reloading dump to perform archiving
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2114.sql');
$output = output(
sub { $exit_status = pt_archiver::main(
'--source', 'h=127.1,P=12345,D=pt_2114,t=t1,u=msandbox,p=msandbox,A=utf8mb4,L=yes',
'--where', '(val) in (select a.val from pt_2114.t1_tmp a where id =2)',
'--bulk-delete', '--purge', '--limit', '10')
},
);
is (
$exit_status,
0,
"PT-2114 exit status OK",
);
$left_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1');
is_deeply(
$zero_rows,
$left_rows,
"PT-2114 Only rows with val=0 left in the table with --bulk-delete"
);
$count_rows = $dbh->selectrow_arrayref('select count(*) from pt_2114.t1');
is (
@{$count_rows}[0],
4,
"PT-2114 Four rows left in the table"
);
# #############################################################################
# Reloading dump to perform archiving
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2114.sql');
# Archiving into a file
$output = output(
sub { $exit_status = pt_archiver::main(
'--where', '(val) in (select a.val from pt_2114.t1_tmp a where id =2)',
'--source', 'h=127.1,P=12345,D=pt_2114,t=t1,u=msandbox,p=msandbox,A=utf8mb4,L=yes',
'--file', 'archive.%D.%t', '-c', 'id'
)
},
);
is (
$exit_status,
0,
"PT-2114 exit status OK",
);
ok(-f 'archive.pt_2114.t1', 'PT-2114 Archive file written OK');
$output = `cat archive.pt_2114.t1`;
is($output, <<EOF
123
125
128
130
EOF
, 'PT-2114 Correct rows archived into the file');
`rm -f archive.pt_2114.t1`;
$left_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1');
is_deeply(
$zero_rows,
$left_rows,
"PT-2114 Only rows with val=0 left in the table after archiving into the file"
);
$count_rows = $dbh->selectrow_arrayref('select count(*) from pt_2114.t1');
is (
@{$count_rows}[0],
4,
"PT-2114 Four rows left in the table"
);
# #############################################################################
# Longer BIT values
# Loading dump to perform archiving
# #############################################################################
$sb->load_file('master', 't/pt-archiver/samples/pt-2114-2.sql');
my $not_archived_rows = $dbh->selectall_arrayref("select id, hex(val) from pt_2114.t1 where val = b'1111000010'");
$output = output(
sub { $exit_status = pt_archiver::main(
'--source', 'h=127.1,P=12345,D=pt_2114,t=t1,u=msandbox,p=msandbox,A=utf8mb4',
'--where', '(val) in (select a.val from pt_2114.t1_tmp a where id =2)',
'--purge')
},
);
is (
$exit_status,
0,
"PT-2114 exit status OK",
);
$left_rows = $dbh->selectall_arrayref('select id, hex(val) from pt_2114.t1');
is_deeply(
$not_archived_rows,
$left_rows,
"PT-2114 Only rows with val=0 left in the table"
);
$count_rows = $dbh->selectrow_arrayref('select count(*) from pt_2114.t1');
is (
@{$count_rows}[0],
4,
"PT-2114 Four rows left in the table"
);
# #############################################################################
# Done.
# #############################################################################
$sb->wipe_clean($dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
exit;

View File

@@ -1,19 +0,0 @@
DROP DATABASE IF EXISTS pt_2114;
CREATE DATABASE pt_2114;
USE pt_2114;
CREATE TABLE `pt_2114`.`t1` (
`id` int NOT NULL AUTO_INCREMENT,
`val` bit(10) NOT NULL DEFAULT b'0',
PRIMARY KEY (`id`,`val`)
) ENGINE=InnoDB AUTO_INCREMENT=122 DEFAULT CHARSET=utf8mb4;
CREATE TABLE pt_2114.t1_tmp LIKE t1;
INSERT INTO pt_2114.t1 (val) VALUES (b'1111000010'),(b'1101001100');
INSERT INTO pt_2114.t1 (val) SELECT val FROM pt_2114.t1 ;
INSERT INTO pt_2114.t1 (val) SELECT val FROM pt_2114.t1 ;
INSERT INTO pt_2114.t1_tmp SELECT NULL, val FROM pt_2114.t1;
CREATE TABLE `pt_2114`.`t2` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`val` bit(1) NOT NULL DEFAULT b'0',
PRIMARY KEY (`id`,`val`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

View File

@@ -1,19 +0,0 @@
DROP DATABASE IF EXISTS pt_2114;
CREATE DATABASE pt_2114;
USE pt_2114;
CREATE TABLE `pt_2114`.`t1` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`val` bit(1) NOT NULL DEFAULT b'0',
PRIMARY KEY (`id`,`val`)
) ENGINE=InnoDB AUTO_INCREMENT=122 DEFAULT CHARSET=utf8mb4;
CREATE TABLE pt_2114.t1_tmp LIKE t1;
INSERT INTO pt_2114.t1 (val) VALUES (0),(1);
INSERT INTO pt_2114.t1 (val) SELECT val FROM pt_2114.t1 ;
INSERT INTO pt_2114.t1 (val) SELECT val FROM pt_2114.t1 ;
INSERT INTO pt_2114.t1_tmp SELECT NULL, val FROM pt_2114.t1;
CREATE TABLE `pt_2114`.`t2` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`val` bit(1) NOT NULL DEFAULT b'0',
PRIMARY KEY (`id`,`val`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

View File

@@ -1,20 +0,0 @@
SET NAMES utf8mb4;
DROP DATABASE IF EXISTS pt_2123;
CREATE DATABASE pt_2123;
CREATE TABLE `pt_2123`.`t1` (
`col1` int(11) NOT NULL AUTO_INCREMENT,
`col2` varchar(3) DEFAULT NULL,
PRIMARY KEY (`col1`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4;
CREATE TABLE `pt_2123`.`t2` (
`col1` int(11) NOT NULL AUTO_INCREMENT,
`col2` varchar(3) DEFAULT NULL,
PRIMARY KEY (`col1`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4;
insert into pt_2123.t1 (col2) values ('');
insert into pt_2123.t1 (col2) values ('');
insert into pt_2123.t1 (col2) values ('');
insert into pt_2123.t1 (col2) values ('');

View File

@@ -82,6 +82,8 @@ $sb->load_file('master', 't/pt-deadlock-logger/samples/deadlocks_tbl.sql', 'test
$output = `$trunk/bin/pt-deadlock-logger $dsn --dest D=test,t=deadlocks --daemonize --run-time 10 --interval 1 --pid $pid_file 1>/dev/null 2>/dev/null`;
#REMOVEME
`echo "test 3" >>/tmp/REMOVEME`;
PerconaTest::wait_for_files($pid_file);
$output = `ps x | grep 'pt-deadlock-logger $dsn' | grep -v grep`;
@@ -91,6 +93,8 @@ like(
'It lives daemonized'
) or diag($output);
#REMOVEME
`echo "test 4" >>/tmp/REMOVEME`;
my ($pid) = $output =~ /(\d+)/;
ok(

View File

@@ -43,9 +43,7 @@ my ($tool) = $PROGRAM_NAME =~ m/([\w-]+)\.t$/;
# mysqldump from earlier versions doesn't seem to work with 5.6,
# so use the actual mysqldump from each MySQL bin which should
# always be compatible with itself.
# We need LC_NUMERIC=POSIX, so test does not fail in environment
# which use , insead of . for numbers.
my $env = qq\CMD_MYSQLDUMP="$ENV{PERCONA_TOOLKIT_SANDBOX}/bin/mysqldump" LC_NUMERIC=POSIX\;
my $env = qq\CMD_MYSQLDUMP="$ENV{PERCONA_TOOLKIT_SANDBOX}/bin/mysqldump"\;
#
# --save-samples

View File

@@ -45,9 +45,7 @@ my ($tool) = $PROGRAM_NAME =~ m/([\w-]+)_encryption\.t$/;
# mysqldump from earlier versions doesn't seem to work with 5.6,
# so use the actual mysqldump from each MySQL bin which should
# always be compatible with itself.
# We need LC_NUMERIC=POSIX, so test does not fail in environment
# which use , insead of . for numbers.
my $env = qq\CMD_MYSQLDUMP="$ENV{PERCONA_TOOLKIT_SANDBOX}/bin/mysqldump" LC_NUMERIC=POSIX\;
my $env = qq\CMD_MYSQLDUMP="$ENV{PERCONA_TOOLKIT_SANDBOX}/bin/mysqldump"\;
#
# --save-samples
@@ -129,7 +127,7 @@ $master_dbh->do("CREATE TABLESPACE foo ADD DATAFILE 'foo.ibd' ENCRYPTION='Y'");
$master_dbh->do("ALTER TABLE test.t1 TABLESPACE=foo");
$master_dbh->do("CREATE TABLE test.t2(a INT PRIMARY KEY) ENCRYPTION='Y'");
$out = `bash $trunk/bin/$tool --list-encrypted-tables -- --defaults-file=/tmp/12345/my.sandbox.cnf`;
$out = `bash $trunk/bin/$tool --list-encrypted-tables`;
like(
$out,

View File

@@ -201,19 +201,6 @@ wsrep_cluster_size 100
HandlerSocket NoSQL | Not Supported
Fast Hash UDFs | Unknown
# Percona XtraDB Cluster #####################################
Cluster Name | pt_sandbox_cluster
Cluster Address | gcomm://
Cluster Size | 3
Cluster Nodes | 192.168.0.100,192.168.0.100,192.168.0.100
Node Name | 12345
Node Status | Primary
SST Method | rsync
Slave Threads | 2
Ignore Split Brain | false
Ignore Quorum | false
gcache Size | 128M
gcache Directory | /tmp/12345/data/
gcache Name | /tmp/12345/data//galera.cache
# Plugins ####################################################
InnoDB compression | ACTIVE
# Query cache ################################################

View File

@@ -167,7 +167,6 @@ wsrep_local_index 4000000000000 45000000
HandlerSocket NoSQL | Not Supported
Fast Hash UDFs | Unknown
# Percona XtraDB Cluster #####################################
wsrep_on | OFF
# Plugins ####################################################
InnoDB compression | ACTIVE
# Query cache ################################################

View File

@@ -201,19 +201,6 @@ wsrep_cluster_size 100
HandlerSocket NoSQL | Not Supported
Fast Hash UDFs | Unknown
# Percona XtraDB Cluster #####################################
Cluster Name | pt_sandbox_cluster
Cluster Address | gcomm://
Cluster Size | 3
Cluster Nodes | 192.168.0.100,192.168.0.100,192.168.0.100
Node Name | 12345
Node Status | Primary
SST Method | rsync
Slave Threads | 2
Ignore Split Brain | false
Ignore Quorum | false
gcache Size | 128M
gcache Directory | /tmp/12345/data/
gcache Name | /tmp/12345/data//galera.cache
# Plugins ####################################################
InnoDB compression | ACTIVE
# Query cache ################################################

View File

@@ -167,7 +167,6 @@ wsrep_local_index 4000000000000 45000000
HandlerSocket NoSQL | Not Supported
Fast Hash UDFs | Unknown
# Percona XtraDB Cluster #####################################
wsrep_on | OFF
# Plugins ####################################################
InnoDB compression | ACTIVE
# Query cache ################################################

View File

@@ -0,0 +1,36 @@
-- give master some advantage on performance
SET GLOBAL innodb_flush_log_at_trx_commit=2;
SET GLOBAL sync_binlog=1000000;
SET GLOBAL innodb_buffer_pool_size=2*1024*1024*1024;
DROP DATABASE IF EXISTS test;
CREATE DATABASE test;.
USE test;
DROP TABLE IF EXISTS `joinit`;
CREATE TABLE `joinit` (
`i` int(11) NOT NULL AUTO_INCREMENT,
`s` varchar(64) DEFAULT NULL,
`t` time NOT NULL,
`g` int(11) NOT NULL,
PRIMARY KEY (`i`),
KEY g_idx (g)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO joinit VALUES (NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )));
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit; -- +256 rows
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit; -- +512 rows
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit; -- +1024 rows
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;
INSERT INTO joinit SELECT NULL, uuid(), time(now()), (FLOOR( 1 + RAND( ) *60 )) FROM joinit;

View File

@@ -1,82 +0,0 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use threads;
use English qw(-no_match_vars);
use Test::More;
use Time::HiRes qw(sleep);
use PerconaTest;
use DSNParser;
use Sandbox;
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
my $cnf = "/tmp/12345/my.sandbox.cnf";
my $replicacnf = "/tmp/12346/my.sandbox.cnf";
my $pid_file = "/tmp/pt-stalk.pid.$PID";
my $log_file = "/tmp/pt-stalk.log.$PID";
my $dest = "/tmp/pt-stalk.collect.$PID";
my $int_file = "/tmp/pt-stalk-after-interval-sleep";
my $pid;
sub cleanup {
diag(`rm $pid_file $log_file $int_file 2>/dev/null`);
diag(`rm -rf $dest 2>/dev/null`);
}
sub wait_n_cycles {
my ($n) = @_;
PerconaTest::wait_until(
sub {
return 0 unless -f "$dest/after_interval_sleep";
my $n_cycles = `wc -l "$dest/after_interval_sleep" | awk '{print \$1}'`;
$n_cycles ||= '';
chomp($n_cycles);
return ($n_cycles || 0) >= $n;
},
1.5,
15
);
}
# ###########################################################################
# Test that SHOW SLAVE STATUS outputs are captured
# ###########################################################################
my $retval = system("$trunk/bin/pt-stalk --no-stalk --run-time 1 --dest $dest --prefix nostalk --pid $pid_file --iterations 1 -- --defaults-file=$cnf --socket=/tmp/12346/mysql_sandbox12346.sock >$log_file 2>&1");
my $output = `cat $dest/nostalk-slave-status|grep Slave_IO_Running`;
like(
$output,
qr/Slave_IO_Running: Yes/,
"SHOW SLAVE STATUS outputs gathered."
);
is(
$retval >> 8,
0,
"Exit 0"
);
# #############################################################################
# Done.
# #############################################################################
cleanup();
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
done_testing;

View File

@@ -37,6 +37,8 @@ $sb->create_dbs($master_dbh, [qw(test)]);
eval { $master_dbh->do('DROP FUNCTION IF EXISTS fnv_64'); };
eval { $master_dbh->do("CREATE FUNCTION fnv_64 RETURNS INTEGER SONAME 'libfnv_udf.so';"); };
if ( $EVAL_ERROR ) {
#REMOVEME
print $EVAL_ERROR;
chomp $EVAL_ERROR;
plan skip_all => "No FNV_64 UDF lib"
}

View File

@@ -46,7 +46,7 @@ my $extra_tables = $dbh->selectrow_arrayref("select count(*) from percona_test.c
is(
PerconaTest::count_checksum_results($output, 'rows'),
$sandbox_version ge '8.0' ? 28 + $extra_tables : $sandbox_version lt '5.7' ? 24 : 23 + $extra_tables,
$sandbox_version ge '8.0' ? 27 + $extra_tables : $sandbox_version lt '5.7' ? 24 : 23 + $extra_tables,
"Large BLOB/TEXT/BINARY Checksum"
);

View File

@@ -1,68 +0,0 @@
#!/usr/bin/env perl
BEGIN {
die "The PERCONA_TOOLKIT_BRANCH environment variable is not set.\n"
unless $ENV{PERCONA_TOOLKIT_BRANCH} && -d $ENV{PERCONA_TOOLKIT_BRANCH};
unshift @INC, "$ENV{PERCONA_TOOLKIT_BRANCH}/lib";
};
use strict;
use warnings FATAL => 'all';
use English qw(-no_match_vars);
use Test::More;
use PerconaTest;
use Sandbox;
use SqlModes;
require "$trunk/bin/pt-table-checksum";
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);
my $dbh = $sb->get_dbh_for('master');
if ( !$dbh ) {
plan skip_all => 'Cannot connect to sandbox master';
}
else {
plan tests => 3;
}
# The sandbox servers run with lock_wait_timeout=3 and it's not dynamic
# so we need to specify --set-vars innodb_lock_wait_timeout=3 else the tool will die.
# And --max-load "" prevents waiting for status variables.
my $master_dsn = 'h=127.1,P=12345,u=msandbox,p=msandbox,D=pt_1059';
my @args = ($master_dsn, qw(--set-vars innodb_lock_wait_timeout=3), '--max-load', '');
my $output;
my $exit_status;
# We test that checksum works with columns and indexes
# that contain new lines
$sb->load_file('master', 't/pt-table-checksum/samples/pt-1059.sql');
# #############################################################################
# PT-1059 LP #1093972: Tools can't parse index names containing newlines
# #############################################################################
($output, $exit_status) = full_output(
sub { pt_table_checksum::main(@args, qw(-d pt_1059)) },
stderr => 1,
);
is(
PerconaTest::count_checksum_results($output, 'errors'),
0,
"Checksum with columns and indexes, containing new lines found no errors"
);
is(
$exit_status,
0,
"Checksum with columns and indexes, containing new lines finished succesfully",
);
# #############################################################################
# Done.
# #############################################################################
$sb->wipe_clean($dbh);
ok($sb->ok(), "Sandbox servers") or BAIL_OUT(__FILE__ . " broke the sandbox");
exit;

View File

@@ -16,8 +16,6 @@ use Sandbox;
use SqlModes;
require "$trunk/bin/pt-table-checksum";
plan skip_all => 'Disabled until PT-2174 is fixed';
my $dp = new DSNParser(opts=>$dsn_opts);
my $sb = new Sandbox(basedir => '/tmp', DSNParser => $dp);

View File

@@ -54,6 +54,8 @@ $output = output(
stderr => 1,
);
#REMOVEME
diag($exit_status);
isnt(
$exit_status,
0,

View File

@@ -1,47 +0,0 @@
CREATE SCHEMA IF NOT EXISTS pt_1059;
USE pt_1059;
DROP TABLE IF EXISTS t1;
CREATE TABLE `t1` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`c` char(1) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `idx_with_
newline` (`c`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t1 (c) VALUES('a'),('b'),('c');
DROP TABLE IF EXISTS t2;
CREATE TABLE `t2` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`column_with_
newline` char(1) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `idx_c` (`column_with_
newline`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t2 (`column_with_
newline`) VALUES('a'),('b'),('c');
DROP TABLE IF EXISTS t3;
CREATE TABLE `t3` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`column_with_
newline` char(1) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `idx_with_
newline` (`column_with_
newline`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t3 (`column_with_
newline`) VALUES('a'),('b'),('c');
DROP TABLE IF EXISTS t4;
CREATE TABLE `t4` (
`
column, starting from new line` char(1)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t4 VALUES('a'),('b'),('c');