Merge branch '3.0' into PT-143

This commit is contained in:
Carlos Salguero
2017-05-23 16:59:30 -03:00
11 changed files with 226 additions and 56 deletions

View File

@@ -1,5 +1,11 @@
Changelog for Percona Toolkit Changelog for Percona Toolkit
v3.0.4
* Fixed bug PT-142 : pt-online-schema-change find_child_tables slow
* Fixed bug PT-138 : Added --output-format option to pt-mongodb-summary
* Feature PT-141 : pt-archiver archive records into csv file
v3.0.3 v3.0.3
* Fixed bug PT-133 : Sandbox won't start correctly if autocommit=0 in my.cnf * Fixed bug PT-133 : Sandbox won't start correctly if autocommit=0 in my.cnf
@@ -8,15 +14,17 @@ v3.0.3
* Fixed bug PT-128 : pt-stalk ps include memory usage outputs * Fixed bug PT-128 : pt-stalk ps include memory usage outputs
* Fixed bug PT-126 : Recognize comments in ALTER * Fixed bug PT-126 : Recognize comments in ALTER
* Fixed bug PT-116 : pt-online-schema change eats data on adding a unique index. Added --[no]use-insert-ignore * Fixed bug PT-116 : pt-online-schema change eats data on adding a unique index. Added --[no]use-insert-ignore
* Feature PT-115 : Make DSNs params able to be repeatable
* Fixed bug PT-115 : Made OptionParser to accept repeatable DSNs * Fixed bug PT-115 : Made OptionParser to accept repeatable DSNs
* Fixed bug PT-111 : Collect MySQL variables * Fixed bug PT-111 : Collect MySQL variables
* Fixed bug PT-087 : Add --skip-check-slave-lag to pt-table-checksum * Fixed bug PT-087 : Add --skip-check-slave-lag to pt-table-checksum
* Fixed bug PT-086 : Added --skip-check-slave-lag to pt-osc * Fixed bug PT-086 : Added --skip-check-slave-lag to pt-osc
* Fixed bug PT-080 : Added support for slave status in pt-stalk * Fixed bug PT-080 : Added support for slave status in pt-stalk
* Feature PT-115 : Make DSNs params able to be repeatable
v3.0.2 released 2017-03-23 v3.0.2 released 2017-03-23
* Fixed bug PT-101 : pt-table-checksum ignores slave-user and slave-password
* Fixed bug PT-105 : pt-table-checksum fails if a database is dropped while the tool is running
* Fixed bug PT-73 : pt-mongodb tools add support for SSL connections * Fixed bug PT-73 : pt-mongodb tools add support for SSL connections
* Fixed bug PT-74 : pt-mongodb-summary Cannot get security settings when connected to a mongod instance * Fixed bug PT-74 : pt-mongodb-summary Cannot get security settings when connected to a mongod instance
* Fixed bug PT-75 : pt-mongodb-query-digest Change the default sort order to -count (descending) * Fixed bug PT-75 : pt-mongodb-query-digest Change the default sort order to -count (descending)
@@ -27,8 +35,6 @@ v3.0.2 released 2017-03-23
* Fixed bug PT-93 : Fix pt-mongodb-query-digest query ID (Thanks Kamil Dziedzic) * Fixed bug PT-93 : Fix pt-mongodb-query-digest query ID (Thanks Kamil Dziedzic)
* Fixed bug PT-94 : pt-online-schema-change makes duplicate rows in _t_new for UPDATE t set pk=0 where pk=1 * Fixed bug PT-94 : pt-online-schema-change makes duplicate rows in _t_new for UPDATE t set pk=0 where pk=1
* Fixed bug PT-96 : Fixed PT tests * Fixed bug PT-96 : Fixed PT tests
* Fixed bug PT-101 : pt-table-checksum ignores slave-user and slave-password
* Fixed bug PT-105 : pt-table-checksum fails if a database is dropped while the tool is running
v3.0.1 released 2017-02-16 v3.0.1 released 2017-02-16

View File

@@ -5893,6 +5893,8 @@ my $get_sth;
my ( $OUT_OF_RETRIES, $ROLLED_BACK, $ALL_IS_WELL ) = ( 0, -1, 1 ); my ( $OUT_OF_RETRIES, $ROLLED_BACK, $ALL_IS_WELL ) = ( 0, -1, 1 );
my ( $src, $dst ); my ( $src, $dst );
my $pxc_version = '0'; my $pxc_version = '0';
my $fields_separated_by = "\t";
my $optionally_enclosed_by;
# Holds the arguments for the $sth's bind variables, so it can be re-tried # Holds the arguments for the $sth's bind variables, so it can be re-tried
# easily. # easily.
@@ -6520,12 +6522,19 @@ sub main {
# Open the file and print the header to it. # Open the file and print the header to it.
if ( $archive_file ) { if ( $archive_file ) {
if ($o->got('output-format') && $o->get('output-format') ne 'dump' && $o->get('output-format') ne 'csv') {
warn "Invalid output format:". $o->get('format');
warn "Using default 'dump' format";
} elsif ($o->get('output-format') || '' eq 'csv') {
$fields_separated_by = ", ";
$optionally_enclosed_by = '"';
}
my $need_hdr = $o->get('header') && !-f $archive_file; my $need_hdr = $o->get('header') && !-f $archive_file;
$archive_fh = IO::File->new($archive_file, ">>$charset") $archive_fh = IO::File->new($archive_file, ">>$charset")
or die "Cannot open $charset $archive_file: $OS_ERROR\n"; or die "Cannot open $charset $archive_file: $OS_ERROR\n";
$archive_fh->autoflush(1) unless $o->get('buffer'); $archive_fh->autoflush(1) unless $o->get('buffer');
if ( $need_hdr ) { if ( $need_hdr ) {
print { $archive_fh } '', escape(\@sel_cols), "\n" print { $archive_fh } '', escape(\@sel_cols, $fields_separated_by, $optionally_enclosed_by), "\n"
or die "Cannot write to $archive_file: $OS_ERROR\n"; or die "Cannot write to $archive_file: $OS_ERROR\n";
} }
} }
@@ -6570,7 +6579,7 @@ sub main {
# problem, hopefully the data has at least made it to the file. # problem, hopefully the data has at least made it to the file.
my $escaped_row; my $escaped_row;
if ( $archive_fh || $bulkins_file ) { if ( $archive_fh || $bulkins_file ) {
$escaped_row = escape([@{$row}[@sel_slice]]); $escaped_row = escape([@{$row}[@sel_slice]], $fields_separated_by, $optionally_enclosed_by);
} }
if ( $archive_fh ) { if ( $archive_fh ) {
trace('print_file', sub { trace('print_file', sub {
@@ -7027,11 +7036,18 @@ sub do_with_retries {
# described in the LOAD DATA INFILE section of the MySQL manual, # described in the LOAD DATA INFILE section of the MySQL manual,
# http://dev.mysql.com/doc/refman/5.0/en/load-data.html # http://dev.mysql.com/doc/refman/5.0/en/load-data.html
sub escape { sub escape {
my ($row) = @_; my ($row, $fields_separated_by, $optionally_enclosed_by) = @_;
return join("\t", map { $fields_separated_by ||= "\t";
$optionally_enclosed_by ||= '';
return join($fields_separated_by, map {
s/([\t\n\\])/\\$1/g if defined $_; # Escape tabs etc s/([\t\n\\])/\\$1/g if defined $_; # Escape tabs etc
defined $_ ? $_ : '\N'; # NULL = \N $_ = defined $_ ? $_ : '\N'; # NULL = \N
# var & ~var will return 0 only for numbers
$_ =~ s/([^\\])"/$1\\"/g if ($_ !~ /^[0-9,.E]+$/ && $optionally_enclosed_by eq '"');
$_ = $optionally_enclosed_by && $_ & ~$_ ? $optionally_enclosed_by."$_".$optionally_enclosed_by : $_;
} @$row); } @$row);
} }
sub ts { sub ts {
@@ -7652,6 +7668,17 @@ Runs OPTIMIZE TABLE after finishing. See L<"--analyze"> for the option syntax
and L<http://dev.mysql.com/doc/en/optimize-table.html> for details on OPTIMIZE and L<http://dev.mysql.com/doc/en/optimize-table.html> for details on OPTIMIZE
TABLE. TABLE.
=item --output-format
type: string
Used with L<"--file"> to specify the output format.
Valid formats are:
dump: MySQL dump format using tabs as field separator (default)
csv : Dump rows using ',' as separator and optionally enclosing fields by '"'.
This format is equivalent to FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'.
=item --password =item --password
short form: -p; type: string short form: -p; type: string

View File

@@ -8770,6 +8770,7 @@ sub main {
tbl => $orig_tbl, tbl => $orig_tbl,
Cxn => $cxn, Cxn => $cxn,
Quoter => $q, Quoter => $q,
only_same_schema_fks => $o->get('only-same-schema-fks'),
); );
if ( !$child_tables ) { if ( !$child_tables ) {
if ( $alter_fk_method ) { if ( $alter_fk_method ) {
@@ -10453,6 +10454,11 @@ sub find_child_tables {
. "FROM information_schema.key_column_usage " . "FROM information_schema.key_column_usage "
. "WHERE referenced_table_schema='$tbl->{db}' " . "WHERE referenced_table_schema='$tbl->{db}' "
. "AND referenced_table_name='$tbl->{tbl}'"; . "AND referenced_table_name='$tbl->{tbl}'";
if ($args{only_same_schema_fks}) {
$sql .= " AND table_schema='$tbl->{db}'";
}
PTDEBUG && _d($sql); PTDEBUG && _d($sql);
my $rows = $cxn->dbh()->selectall_arrayref($sql); my $rows = $cxn->dbh()->selectall_arrayref($sql);
if ( !$rows || !@$rows ) { if ( !$rows || !@$rows ) {
@@ -11874,6 +11880,13 @@ them. The rows which contain NULL values will be converted to the defined
default value. If no explicit DEFAULT value is given MySQL will assign a default default value. If no explicit DEFAULT value is given MySQL will assign a default
value based on datatype, e.g. 0 for number datatypes, '' for string datatypes. value based on datatype, e.g. 0 for number datatypes, '' for string datatypes.
=item --only-same-schema-fks
Check foreigns keys only on tables on the same schema than the original table.
This option is dangerous since if you have FKs refenrencing tables in other
schemas, they won't be detected.
=item --password =item --password
short form: -p; type: string short form: -p; type: string

View File

@@ -26,3 +26,4 @@ log-error = /tmp/PORT/data/mysqld.log
innodb_lock_wait_timeout = 3 innodb_lock_wait_timeout = 3
general_log general_log
general_log_file = genlog general_log_file = genlog
secure-file-priv =

View File

@@ -1,11 +1,13 @@
package main package main
import ( import (
"bytes"
"encoding/json"
"fmt" "fmt"
"html/template"
"net" "net"
"os" "os"
"strings" "strings"
"text/template"
"time" "time"
version "github.com/hashicorp/go-version" version "github.com/hashicorp/go-version"
@@ -33,6 +35,7 @@ const (
DEFAULT_LOGLEVEL = "warn" DEFAULT_LOGLEVEL = "warn"
DEFAULT_RUNNINGOPSINTERVAL = 1000 // milliseconds DEFAULT_RUNNINGOPSINTERVAL = 1000 // milliseconds
DEFAULT_RUNNINGOPSSAMPLES = 5 DEFAULT_RUNNINGOPSSAMPLES = 5
DEFAULT_OUTPUT_FORMAT = "text"
) )
var ( var (
@@ -130,12 +133,24 @@ type options struct {
Version bool Version bool
NoVersionCheck bool NoVersionCheck bool
NoRunningOps bool NoRunningOps bool
OutputFormat string
RunningOpsSamples int RunningOpsSamples int
RunningOpsInterval int RunningOpsInterval int
SSLCAFile string SSLCAFile string
SSLPEMKeyFile string SSLPEMKeyFile string
} }
type collectedInfo struct {
BalancerStats *proto.BalancerStats
ClusterWideInfo *clusterwideInfo
OplogInfo []proto.OplogInfo
ReplicaMembers []proto.Members
RunningOps *opCounters
SecuritySettings *security
HostInfo *hostInfo
Errors []string
}
func main() { func main() {
opts, err := parseFlags() opts, err := parseFlags()
@@ -210,77 +225,105 @@ func main() {
defer session.Close() defer session.Close()
session.SetMode(mgo.Monotonic, true) session.SetMode(mgo.Monotonic, true)
hostInfo, err := GetHostinfo(session) ci := &collectedInfo{}
ci.HostInfo, err = GetHostinfo(session)
if err != nil { if err != nil {
message := fmt.Sprintf("Cannot get host info for %q: %s", di.Addrs[0], err.Error()) message := fmt.Sprintf("Cannot get host info for %q: %s", di.Addrs[0], err.Error())
log.Errorf(message) log.Errorf(message)
os.Exit(2) os.Exit(2)
} }
if replicaMembers, err := util.GetReplicasetMembers(dialer, di); err != nil { if ci.ReplicaMembers, err = util.GetReplicasetMembers(dialer, di); err != nil {
log.Warnf("[Error] cannot get replicaset members: %v\n", err) log.Warnf("[Error] cannot get replicaset members: %v\n", err)
os.Exit(2) os.Exit(2)
} else {
log.Debugf("replicaMembers:\n%+v\n", replicaMembers)
t := template.Must(template.New("replicas").Parse(templates.Replicas))
t.Execute(os.Stdout, replicaMembers)
} }
log.Debugf("replicaMembers:\n%+v\n", ci.ReplicaMembers)
// Host Info
t := template.Must(template.New("hosttemplateData").Parse(templates.HostInfo))
t.Execute(os.Stdout, hostInfo)
if opts.RunningOpsSamples > 0 && opts.RunningOpsInterval > 0 { if opts.RunningOpsSamples > 0 && opts.RunningOpsInterval > 0 {
if rops, err := GetOpCountersStats(session, opts.RunningOpsSamples, time.Duration(opts.RunningOpsInterval)*time.Millisecond); err != nil { if ci.RunningOps, err = GetOpCountersStats(session, opts.RunningOpsSamples, time.Duration(opts.RunningOpsInterval)*time.Millisecond); err != nil {
log.Printf("[Error] cannot get Opcounters stats: %v\n", err) log.Printf("[Error] cannot get Opcounters stats: %v\n", err)
} else {
t := template.Must(template.New("runningOps").Parse(templates.RunningOps))
t.Execute(os.Stdout, rops)
} }
} }
if hostInfo != nil { if ci.HostInfo != nil {
if security, err := GetSecuritySettings(session, hostInfo.Version); err != nil { if ci.SecuritySettings, err = GetSecuritySettings(session, ci.HostInfo.Version); err != nil {
log.Errorf("[Error] cannot get security settings: %v\n", err) log.Errorf("[Error] cannot get security settings: %v\n", err)
} else {
t := template.Must(template.New("ssl").Parse(templates.Security))
t.Execute(os.Stdout, security)
} }
} else { } else {
log.Warn("Cannot check security settings since host info is not available (permissions?)") log.Warn("Cannot check security settings since host info is not available (permissions?)")
} }
if oplogInfo, err := oplog.GetOplogInfo(hostnames, di); err != nil { if ci.OplogInfo, err = oplog.GetOplogInfo(hostnames, di); err != nil {
log.Info("Cannot get Oplog info: %v\n", err) log.Info("Cannot get Oplog info: %v\n", err)
} else { } else {
if len(oplogInfo) > 0 { if len(ci.OplogInfo) == 0 {
t := template.Must(template.New("oplogInfo").Parse(templates.Oplog))
t.Execute(os.Stdout, oplogInfo[0])
} else {
log.Info("oplog info is empty. Skipping") log.Info("oplog info is empty. Skipping")
} else {
ci.OplogInfo = ci.OplogInfo[:1]
} }
} }
// individual servers won't know about this info // individual servers won't know about this info
if hostInfo.NodeType == "mongos" { if ci.HostInfo.NodeType == "mongos" {
if cwi, err := GetClusterwideInfo(session); err != nil { if ci.ClusterWideInfo, err = GetClusterwideInfo(session); err != nil {
log.Printf("[Error] cannot get cluster wide info: %v\n", err) log.Printf("[Error] cannot get cluster wide info: %v\n", err)
} else {
t := template.Must(template.New("clusterwide").Parse(templates.Clusterwide))
t.Execute(os.Stdout, cwi)
} }
} }
if hostInfo.NodeType == "mongos" { if ci.HostInfo.NodeType == "mongos" {
if bs, err := GetBalancerStats(session); err != nil { if ci.BalancerStats, err = GetBalancerStats(session); err != nil {
log.Printf("[Error] cannot get balancer stats: %v\n", err) log.Printf("[Error] cannot get balancer stats: %v\n", err)
} else {
t := template.Must(template.New("balancer").Parse(templates.BalancerStats))
t.Execute(os.Stdout, bs)
} }
} }
out, err := formatResults(ci, opts.OutputFormat)
if err != nil {
log.Errorf("Cannot format the results: %s", err.Error())
os.Exit(1)
}
fmt.Println(string(out))
}
func formatResults(ci *collectedInfo, format string) ([]byte, error) {
var buf *bytes.Buffer
switch format {
case "json":
b, err := json.MarshalIndent(ci, "", " ")
if err != nil {
return nil, fmt.Errorf("[Error] Cannot convert results to json: %s", err.Error())
}
buf = bytes.NewBuffer(b)
default:
buf = new(bytes.Buffer)
t := template.Must(template.New("replicas").Parse(templates.Replicas))
t.Execute(buf, ci.ReplicaMembers)
t = template.Must(template.New("hosttemplateData").Parse(templates.HostInfo))
t.Execute(buf, ci.HostInfo)
t = template.Must(template.New("runningOps").Parse(templates.RunningOps))
t.Execute(buf, ci.RunningOps)
t = template.Must(template.New("ssl").Parse(templates.Security))
t.Execute(buf, ci.SecuritySettings)
if ci.OplogInfo != nil && len(ci.OplogInfo) > 0 {
t = template.Must(template.New("oplogInfo").Parse(templates.Oplog))
t.Execute(buf, ci.OplogInfo[0])
}
t = template.Must(template.New("clusterwide").Parse(templates.Clusterwide))
t.Execute(buf, ci.ClusterWideInfo)
t = template.Must(template.New("balancer").Parse(templates.BalancerStats))
t.Execute(buf, ci.BalancerStats)
}
return buf.Bytes(), nil
} }
func GetHostinfo(session pmgo.SessionManager) (*hostInfo, error) { func GetHostinfo(session pmgo.SessionManager) (*hostInfo, error) {
@@ -472,6 +515,7 @@ func GetSecuritySettings(session pmgo.SessionManager, ver string) (*security, er
// Lets try both // Lets try both
newSession := session.Clone() newSession := session.Clone()
defer newSession.Close() defer newSession.Close()
newSession.SetMode(mgo.Strong, true) newSession.SetMode(mgo.Strong, true)
if s.Users, s.Roles, err = getUserRolesCount(newSession); err != nil { if s.Users, s.Roles, err = getUserRolesCount(newSession); err != nil {
@@ -811,6 +855,7 @@ func parseFlags() (*options, error) {
RunningOpsSamples: DEFAULT_RUNNINGOPSSAMPLES, RunningOpsSamples: DEFAULT_RUNNINGOPSSAMPLES,
RunningOpsInterval: DEFAULT_RUNNINGOPSINTERVAL, // milliseconds RunningOpsInterval: DEFAULT_RUNNINGOPSINTERVAL, // milliseconds
AuthDB: DEFAULT_AUTHDB, AuthDB: DEFAULT_AUTHDB,
OutputFormat: DEFAULT_OUTPUT_FORMAT,
} }
gop := getopt.New() gop := getopt.New()
@@ -821,8 +866,9 @@ func parseFlags() (*options, error) {
gop.StringVarLong(&opts.User, "username", 'u', "", "Username to use for optional MongoDB authentication") gop.StringVarLong(&opts.User, "username", 'u', "", "Username to use for optional MongoDB authentication")
gop.StringVarLong(&opts.Password, "password", 'p', "", "Password to use for optional MongoDB authentication").SetOptional() gop.StringVarLong(&opts.Password, "password", 'p', "", "Password to use for optional MongoDB authentication").SetOptional()
gop.StringVarLong(&opts.AuthDB, "authenticationDatabase", 'a', "admin", gop.StringVarLong(&opts.AuthDB, "authenticationDatabase", 'a', "admin",
"Databaae to use for optional MongoDB authentication. Default: admin") "Database to use for optional MongoDB authentication. Default: admin")
gop.StringVarLong(&opts.LogLevel, "log-level", 'l', "error", "Log level: panic, fatal, error, warn, info, debug. Default: error") gop.StringVarLong(&opts.LogLevel, "log-level", 'l', "error", "Log level: panic, fatal, error, warn, info, debug. Default: error")
gop.StringVarLong(&opts.OutputFormat, "output-format", 'f', "text", "Output format: text, json. Default: text")
gop.IntVarLong(&opts.RunningOpsSamples, "running-ops-samples", 's', gop.IntVarLong(&opts.RunningOpsSamples, "running-ops-samples", 's',
fmt.Sprintf("Number of samples to collect for running ops. Default: %d", opts.RunningOpsSamples)) fmt.Sprintf("Number of samples to collect for running ops. Default: %d", opts.RunningOpsSamples))
@@ -852,6 +898,9 @@ func parseFlags() (*options, error) {
gop.PrintUsage(os.Stdout) gop.PrintUsage(os.Stdout)
return nil, nil return nil, nil
} }
if opts.OutputFormat != "json" && opts.OutputFormat != "text" {
log.Infof("Invalid output format '%s'. Using text format", opts.OutputFormat)
}
return opts, nil return opts, nil
} }

View File

@@ -8,6 +8,7 @@ import (
"testing" "testing"
"time" "time"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/dbtest" "gopkg.in/mgo.v2/dbtest"
@@ -205,6 +206,9 @@ func TestSecurityOpts(t *testing.T) {
session.EXPECT().DB("admin").Return(database) session.EXPECT().DB("admin").Return(database)
database.EXPECT().Run(bson.D{{"getCmdLineOpts", 1}, {"recordStats", 1}}, gomock.Any()).SetArg(1, cmd) database.EXPECT().Run(bson.D{{"getCmdLineOpts", 1}, {"recordStats", 1}}, gomock.Any()).SetArg(1, cmd)
session.EXPECT().Clone().Return(session)
session.EXPECT().SetMode(mgo.Strong, true)
session.EXPECT().DB("admin").Return(database) session.EXPECT().DB("admin").Return(database)
database.EXPECT().C("system.users").Return(usersCol) database.EXPECT().C("system.users").Return(usersCol)
usersCol.EXPECT().Count().Return(1, nil) usersCol.EXPECT().Count().Return(1, nil)
@@ -212,6 +216,7 @@ func TestSecurityOpts(t *testing.T) {
session.EXPECT().DB("admin").Return(database) session.EXPECT().DB("admin").Return(database)
database.EXPECT().C("system.roles").Return(rolesCol) database.EXPECT().C("system.roles").Return(rolesCol)
rolesCol.EXPECT().Count().Return(2, nil) rolesCol.EXPECT().Count().Return(2, nil)
session.EXPECT().Close().Return()
got, err := GetSecuritySettings(session, "3.2") got, err := GetSecuritySettings(session, "3.2")
@@ -395,18 +400,22 @@ func TestParseArgs(t *testing.T) {
Host: DEFAULT_HOST, Host: DEFAULT_HOST,
LogLevel: DEFAULT_LOGLEVEL, LogLevel: DEFAULT_LOGLEVEL,
AuthDB: DEFAULT_AUTHDB, AuthDB: DEFAULT_AUTHDB,
RunningOpsSamples: DEFAULT_RUNNINGOPSSAMPLES,
RunningOpsInterval: DEFAULT_RUNNINGOPSINTERVAL,
OutputFormat: "text",
}, },
}, },
{ {
args: []string{TOOLNAME, "zapp.brannigan.net:27018/samples", "--help"}, args: []string{TOOLNAME, "zapp.brannigan.net:27018/samples", "--help"},
want: &options{ want: nil,
Host: "zapp.brannigan.net:27018/samples",
LogLevel: DEFAULT_LOGLEVEL,
AuthDB: DEFAULT_AUTHDB,
Help: true,
},
}, },
} }
// Capture stdout to not to show help
old := os.Stdout // keep backup of the real stdout
_, w, _ := os.Pipe()
os.Stdout = w
for i, test := range tests { for i, test := range tests {
getopt.Reset() getopt.Reset()
os.Args = test.args os.Args = test.args
@@ -419,4 +428,6 @@ func TestParseArgs(t *testing.T) {
} }
} }
os.Stdout = old
} }

View File

@@ -52,7 +52,7 @@ my $want = [
key_len => 2, key_len => 2,
ref => 'const', ref => 'const',
rows => 1, rows => 1,
Extra => $sandbox_version gt '5.6' ? undef : '', Extra => $sandbox_version eq '5.6' ? undef : '',
}, },
]; ];
if ( $sandbox_version gt '5.6' ) { if ( $sandbox_version gt '5.6' ) {

View File

@@ -115,6 +115,25 @@ like(
"..but an unknown charset fails" "..but an unknown charset fails"
); );
local $SIG{__WARN__} = undef;
$sb->load_file('master', 't/pt-archiver/samples/table2.sql');
`rm -f archive.test.table_2`;
$output = output(
sub { pt_archiver::main(qw(--where 1=1 --output-format=csv), "--source", "D=test,t=table_2,F=$cnf", "--file", 'archive.%D.%t') },
);
$output = `cat archive.test.table_2`;
is($output, <<EOF
1, 2, 3, "4"
2, "\\N", 3, "4"
3, 2, 3, "\\\t"
4, 2, 3, "\\\n"
5, 2, 3, "Zapp \\"Brannigan"
EOF
, '--output-format=csv');
`rm -f archive.test.table_2`;
# ############################################################################# # #############################################################################
# Done. # Done.
# ############################################################################# # #############################################################################

View File

@@ -0,0 +1,19 @@
CREATE SCHEMA IF NOT EXISTS test;
use test;
drop table if exists table_2;
create table table_2(
a int not null primary key,
b int,
c int not null,
d varchar(50),
key(b)
) engine=innodb;
insert into table_2 values
(1, 2, 3, 4),
(2, null, 3, 4),
(3, 2, 3, "\t"),
(4, 2, 3, "\n"),
(5, 2, 3, "Zapp \"Brannigan");

View File

@@ -445,6 +445,31 @@ $output = output(
# clear databases with their foreign keys # clear databases with their foreign keys
$sb->load_file('master', "$sample/bug-1315130_cleanup.sql"); $sb->load_file('master', "$sample/bug-1315130_cleanup.sql");
# #############################################################################
# Issue 1315130
# Failed to detect child tables in other schema, and falsely identified
# child tables in own schema
# #############################################################################
$sb->load_file('master', "$sample/bug-1315130_cleanup.sql");
$sb->load_file('master', "$sample/bug-1315130.sql");
$output = output(
sub { pt_online_schema_change::main(@args, "$master_dsn,D=bug_1315130_a,t=parent_table",
'--dry-run',
'--alter', "add column c varchar(16)",
'--alter-foreign-keys-method', 'auto', '--only-same-schema-fks'),
},
);
like(
$output,
qr/Child tables:\s*`bug_1315130_a`\.`child_table_in_same_schema` \(approx\. 1 rows\)[^`]*?Will/s,
"Ignore child tables in other schemas.",
);
# clear databases with their foreign keys
$sb->load_file('master', "$sample/bug-1315130_cleanup.sql");
# ############################################################################# # #############################################################################
# Issue 1340728 # Issue 1340728

View File

@@ -105,7 +105,7 @@ ok(
$row = $master_dbh->selectrow_arrayref("select count(*) from percona.checksums"); $row = $master_dbh->selectrow_arrayref("select count(*) from percona.checksums");
my $max_rows = $sandbox_version < '5.7' ? 75 : 100; my $max_rows = $sandbox_version < '5.7' ? 90 : 100;
ok( ok(
$row->[0] >= 75 && $row->[0] <= $max_rows, $row->[0] >= 75 && $row->[0] <= $max_rows,
'Between 75 and 90 chunks on master' 'Between 75 and 90 chunks on master'