Merge lp:~percona-toolkit-dev/percona-toolkit/test-pt-osc-master-to-cluster, aka official PXC support and automated testing.

This commit is contained in:
Daniel Nichter
2013-03-08 14:15:16 -07:00
9 changed files with 180 additions and 65 deletions

View File

@@ -5395,7 +5395,7 @@ pt-heartbeat - Monitor MySQL replication delay.
=head1 SYNOPSIS =head1 SYNOPSIS
Usage: pt-heartbeat [OPTION...] [DSN] --update|--monitor|--check|--stop Usage: pt-heartbeat [OPTIONS] [DSN] --update|--monitor|--check|--stop
pt-heartbeat measures replication lag on a MySQL or PostgreSQL server. You can pt-heartbeat measures replication lag on a MySQL or PostgreSQL server. You can
use it to update a master or monitor a replica. If possible, MySQL connection use it to update a master or monitor a replica. If possible, MySQL connection

View File

@@ -341,7 +341,7 @@ sub wait_for_slaves {
sub { sub {
my ($pong) = $slave2_dbh->selectrow_array( my ($pong) = $slave2_dbh->selectrow_array(
"SELECT ping FROM percona_test.sentinel WHERE id=1 /* wait_for_slaves */"); "SELECT ping FROM percona_test.sentinel WHERE id=1 /* wait_for_slaves */");
return $ping eq $pong; return $ping eq ($pong || '');
}, undef, 300 }, undef, 300
); );
} }

View File

@@ -41,11 +41,13 @@ else
ARCH="32" ARCH="32"
fi fi
APP="${FORK:-"mysql"}"
MYSQL_BIN_DIR="$HOME/mysql-bin" MYSQL_BIN_DIR="$HOME/mysql-bin"
[ -d "$MYSQL_BIN_DIR" ] || mkdir "$MYSQL_BIN_DIR" [ -d "$MYSQL_BIN_DIR" ] || mkdir "$MYSQL_BIN_DIR"
find_mysql_base_dir() { find_mysql_base_dir() {
find "$MYSQL_BIN_DIR" -name "mysql-$1*" -type d | tail -n 1 find "$MYSQL_BIN_DIR" -name "$APP-$1*" -type d | tail -n 1
} }
MYSQL_BASE_DIR="$(find_mysql_base_dir $MYSQL)" MYSQL_BASE_DIR="$(find_mysql_base_dir $MYSQL)"
@@ -59,14 +61,25 @@ fi
if [ -z "$MYSQL_BASE_DIR" ]; then if [ -z "$MYSQL_BASE_DIR" ]; then
( (
cd $MYSQL_BIN_DIR cd $MYSQL_BIN_DIR
wget -q -O mysql.tar.gz http://hackmysql.com/barebones/mysql/$MYSQL/$ARCH wget -q -O mysql.tar.gz http://hackmysql.com/barebones/$APP/$MYSQL/$ARCH \
|| exit 1
tar xvfz mysql.tar.gz tar xvfz mysql.tar.gz
rm mysql.tar.gz rm mysql.tar.gz
) )
MYSQL_BASE_DIR="$(find_mysql_base_dir $MYSQL)" MYSQL_BASE_DIR="$(find_mysql_base_dir $MYSQL)"
fi fi
if [ -z "$("$MYSQL_BASE_DIR/bin/mysqld" -V)" ]; then if [ $APP = "mysql" ]; then
mysqld_check="$("$MYSQL_BASE_DIR/bin/mysqld" -V)"
elif [ $APP = "pxc" ]; then
ip="$(perl -MNet::Address::IP::Local -le 'print Net::Address::IP::Local->public')"
mysqld_check="$("$MYSQL_BASE_DIR/bin/mysqld" -V --bind-address $ip)"
else
echo "Invalid FORK=$APP" >&2
exit 1
fi
if [ -z "$mysqld_check" ]; then
echo "$MYSQL_BASE_DIR/bin/mysqld does not execute" >&2 echo "$MYSQL_BASE_DIR/bin/mysqld does not execute" >&2
exit 1 exit 1
fi fi
@@ -95,7 +108,14 @@ rm ~/.my* || true
sandbox/test-env checkconfig || exit 1 sandbox/test-env checkconfig || exit 1
sandbox/test-env stop || exit 1 sandbox/test-env stop || exit 1
sandbox/test-env kill || exit 1 sandbox/test-env kill || exit 1
sandbox/test-env start || exit 1 if [ $APP = "mysql" ]; then
sandbox/test-env start || exit 1
elif [ $APP = "pxc" ]; then
sandbox/test-env start cluster || exit 1
else
echo "Invalid FORK=$app" >&2
exit 1
fi
####################### #######################
# Set debug env vars. # # Set debug env vars. #
@@ -122,8 +142,6 @@ EXIT_STATUS=$(($? | 0))
############# #############
# Clean up. # # Clean up. #
############# #############
set +x
sandbox/test-env stop sandbox/test-env stop
set -x
exit $EXIT_STATUS exit $EXIT_STATUS

View File

@@ -50,7 +50,6 @@ make_sandbox() {
if [ "${type}" = "cluster" ]; then if [ "${type}" = "cluster" ]; then
cp $PERCONA_TOOLKIT_BRANCH/sandbox/servers/pxc/$version/my.sandbox.cnf /tmp/$port cp $PERCONA_TOOLKIT_BRANCH/sandbox/servers/pxc/$version/my.sandbox.cnf /tmp/$port
local ip="$(perl -MNet::Address::IP::Local -le 'print Net::Address::IP::Local->public')"
local libgalera="$PERCONA_TOOLKIT_SANDBOX/lib/libgalera_smm.so" local libgalera="$PERCONA_TOOLKIT_SANDBOX/lib/libgalera_smm.so"
local cluster_name="${CLUSTER_NAME:-"pt_sandbox_cluster"}" local cluster_name="${CLUSTER_NAME:-"pt_sandbox_cluster"}"
local cluster_address="gcomm://" local cluster_address="gcomm://"
@@ -143,7 +142,6 @@ make_sandbox() {
/tmp/$port/use -e "CREATE DATABASE IF NOT EXISTS percona_test"; /tmp/$port/use -e "CREATE DATABASE IF NOT EXISTS percona_test";
/tmp/$port/use -e "CREATE TABLE IF NOT EXISTS percona_test.sentinel (id INT PRIMARY KEY, ping VARCHAR(64) NOT NULL DEFAULT '')"; /tmp/$port/use -e "CREATE TABLE IF NOT EXISTS percona_test.sentinel (id INT PRIMARY KEY, ping VARCHAR(64) NOT NULL DEFAULT '')";
/tmp/$port/use -e "REPLACE INTO percona_test.sentinel (id, ping) VALUES (1, '')";
if [ -n "${MYSQL_UPGRADE:-""}" ]; then if [ -n "${MYSQL_UPGRADE:-""}" ]; then
mysql_upgrade_on /tmp/$port/my.sandbox.cnf mysql_upgrade_on /tmp/$port/my.sandbox.cnf
@@ -225,7 +223,16 @@ elif [ -x "$PERCONA_TOOLKIT_SANDBOX/libexec/mysqld" ]; then
else else
die "Cannot find executable mysqld in $PERCONA_TOOLKIT_SANDBOX/bin, $PERCONA_TOOLKIT_SANDBOX/sbin or $PERCONA_TOOLKIT_SANDBOX/libexec." die "Cannot find executable mysqld in $PERCONA_TOOLKIT_SANDBOX/bin, $PERCONA_TOOLKIT_SANDBOX/sbin or $PERCONA_TOOLKIT_SANDBOX/libexec."
fi fi
version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`;
APP="${FORK:-"mysql"}"
if [ $type = "cluster" -o $APP = "pxc" ]; then
ip=$(perl -MNet::Address::IP::Local -le 'print Net::Address::IP::Local->public')
version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V --bind-address $ip 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`;
else
version=`$PERCONA_TOOLKIT_SANDBOX/$mysqld -V 2>/dev/null | awk '{print $3}' | cut -d. -f 1,2`;
fi
if [ ! -d "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version" ]; then if [ ! -d "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version" ]; then
die "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version does not exist." die "$PERCONA_TOOLKIT_BRANCH/sandbox/servers/$version does not exist."
fi fi

View File

@@ -334,10 +334,10 @@ case $opt in
../util/check-load-data ../util/check-load-data
ping=$(/tmp/12345/use -ss -e "SELECT MD5(RAND())") ping=$(/tmp/12345/use -ss -e "SELECT MD5(RAND())")
/tmp/12345/use -e "UPDATE percona_test.sentinel SET ping='$ping' WHERE id=1"; /tmp/12345/use -e "REPLACE INTO percona_test.sentinel (id, ping) VALUES (1, '$ping')";
echo -n "Waiting for replication to finish..." echo -n "Waiting for replication to finish..."
for i in $(_seq 60); do for i in $(_seq 60); do
pong=$(/tmp/12347/use -ss -e 'SELECT ping FROM percona_test.sentinel WHERE id=1' 2>/dev/null) pong=$(/tmp/12347/use -ss -e "SELECT ping FROM percona_test.sentinel WHERE id=1 AND ping='$ping'" 2>/dev/null)
[ "$ping" = "$pong" ] && break [ "$ping" = "$pong" ] && break
echo -n '.' echo -n '.'
sleep 1 sleep 1
@@ -379,8 +379,9 @@ case $opt in
done done
;; ;;
restart) restart)
$0 stop shift;
$0 start $0 stop "$@"
$0 start "$@"
;; ;;
status) status)
sandbox_status 'master' '12345' sandbox_status 'master' '12345'

View File

@@ -115,11 +115,13 @@ is(
"Sanity check: All nodes are in the heartbeat table" "Sanity check: All nodes are in the heartbeat table"
); );
# These values may be 0 or '' depending on whether or not a previous test
# turned 12345 into a slave or not. For this purpose 0 == undef == ''.
my $only_slave_data = { my $only_slave_data = {
map { map {
$_ => { $_ => {
relay_master_log_file => $rows->{$_}->{relay_master_log_file}, relay_master_log_file => $rows->{$_}->{relay_master_log_file} || undef,
exec_master_log_pos => $rows->{$_}->{exec_master_log_pos}, exec_master_log_pos => $rows->{$_}->{exec_master_log_pos} || undef,
} } keys %$rows } } keys %$rows
}; };
@@ -132,7 +134,7 @@ is_deeply(
12347 => $same_data, 12347 => $same_data,
}, },
"Sanity check: No slave data (relay log or master pos) is stored" "Sanity check: No slave data (relay log or master pos) is stored"
); ) or diag(Dumper($rows));
$output = output(sub{ $output = output(sub{
pt_heartbeat::main($node1_dsn, qw(-D test --check)), pt_heartbeat::main($node1_dsn, qw(-D test --check)),
@@ -235,11 +237,11 @@ my ($slave_dbh, $slave_dsn) = $sb->start_sandbox(
server => 'cslave1', server => 'cslave1',
type => 'slave', type => 'slave',
master => 'node1', master => 'node1',
env => q/BINLOG_FORMAT="ROW"/, env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
); );
$sb->create_dbs($slave_dbh, ['test']); $sb->create_dbs($slave_dbh, ['test']);
$sb->wait_for_slaves(master => 'node1', slave => 'cslave1');
start_update_instance($sb->port_for('cslave1')); start_update_instance($sb->port_for('cslave1'));
PerconaTest::wait_for_table($slave_dbh, "test.heartbeat", "1=1"); PerconaTest::wait_for_table($slave_dbh, "test.heartbeat", "1=1");
@@ -292,31 +294,37 @@ like(
my ($master_dbh, $master_dsn) = $sb->start_sandbox( my ($master_dbh, $master_dsn) = $sb->start_sandbox(
server => 'cmaster', server => 'cmaster',
type => 'master', type => 'master',
env => q/BINLOG_FORMAT="ROW"/, env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
); );
my $cmaster_port = $sb->port_for('cmaster'); my $cmaster_port = $sb->port_for('cmaster');
$sb->create_dbs($master_dbh, ['test']); $sb->create_dbs($master_dbh, ['test']);
$master_dbh->do("INSERT INTO percona_test.sentinel (id, ping) VALUES (1, '')");
$master_dbh->do("FLUSH LOGS"); $master_dbh->do("FLUSH LOGS");
$master_dbh->do("RESET MASTER"); $master_dbh->do("RESET MASTER");
$sb->set_as_slave('node1', 'cmaster'); $sb->set_as_slave('node1', 'cmaster');
$sb->wait_for_slaves(master => 'cmaster', slave => 'node1');
start_update_instance($sb->port_for('cmaster')); start_update_instance($sb->port_for('cmaster'));
PerconaTest::wait_for_table($node1, "test.heartbeat", "server_id=$cmaster_port"); PerconaTest::wait_for_table($node1, "test.heartbeat", "server_id=$cmaster_port");
$output = output(sub{ # Auto-detecting the master id only works when ran on node1, the direct
pt_heartbeat::main($node1_dsn, qw(-D test --check --print-master-server-id)), # slave of the master, because other nodes aren't slaves, but this could
}, # be made to work; see the node autodiscovery branch.
$output = output(
sub {
pt_heartbeat::main($node1_dsn,
qw(-D test --check --print-master-server-id)
)},
stderr => 1, stderr => 1,
); );
like( like(
$output, $output,
qr/^\d.\d{2} $cmaster_port$/, qr/^\d.\d{2} $cmaster_port$/,
"--print-master-id works for master -> $node1_port, when run from $node1_port" "Auto-detect master ID from node1"
); );
# Wait until node2 & node3 get cmaster in their heartbeat tables # Wait until node2 & node3 get cmaster in their heartbeat tables
@@ -324,38 +332,39 @@ $sb->wait_for_slaves(master => 'node1', slave => 'node2');
$sb->wait_for_slaves(master => 'node1', slave => 'node3'); $sb->wait_for_slaves(master => 'node1', slave => 'node3');
foreach my $test ( foreach my $test (
[ $node2_port, $node2_dsn, $node2 ], [ $node2_port, $node2_dsn, $node2, 'node2' ],
[ $node3_port, $node3_dsn, $node3 ], [ $node3_port, $node3_dsn, $node3, 'node3' ],
) { ) {
my ($port, $dsn, $dbh) = @$test; my ($port, $dsn, $dbh, $name) = @$test;
$output = output(sub{ $output = output(
pt_heartbeat::main($dsn, qw(-D test --check --print-master-server-id)), sub {
}, pt_heartbeat::main($dsn,
qw(-D test --check --print-master-server-id)
)},
stderr => 1, stderr => 1,
); );
# This could be made to work, see the node autodiscovery branch
TODO: {
local $::TODO = "cmaster -> node1, other nodes can't autodetect the master";
like( like(
$output, $output,
qr/$cmaster_port/, qr/server's master could not be automatically determined/,
"--print-master-id works for master -> $node1_port, when run from $port" "Limitation: cannot auto-detect master id from $name"
); );
}
$output = output(sub{ $output = output(
pt_heartbeat::main($dsn, qw(-D test --check --master-server-id), $cmaster_port), sub {
}, pt_heartbeat::main($dsn,
qw(-D test --check --master-server-id), $cmaster_port
)},
stderr => 1, stderr => 1,
); );
$output =~ s/\d\.\d{2}/0.00/g; $output =~ s/\d\.\d{2}/0.00/g;
is( is(
$output, $output,
"0.00\n", "0.00\n",
"--check + explicit --master-server-id work for master -> node1, run from $port" "$name --check --master-server-id $cmaster_port"
); );
} }

View File

@@ -119,7 +119,7 @@ ok(
$exit, $exit,
"wsrep_OSU_method=RSU: non-zero exit" "wsrep_OSU_method=RSU: non-zero exit"
) or diag($output); ) or diag($output);
print $output;
like( like(
$output, $output,
qr/wsrep_OSU_method=TOI is required.+?currently set to RSU/, qr/wsrep_OSU_method=TOI is required.+?currently set to RSU/,
@@ -133,6 +133,56 @@ is_deeply(
"Restored wsrep_OSU_method=TOI" "Restored wsrep_OSU_method=TOI"
) or BAIL_OUT("Failed to restore wsrep_OSU_method=TOI"); ) or BAIL_OUT("Failed to restore wsrep_OSU_method=TOI");
# #############################################################################
# master -> cluster, run on master on table with foreign keys.
# #############################################################################
# CAREFUL: The master and the cluster are different, so don't do stuff
# on the master that will conflict with stuff already done on the cluster.
# And since we're using RBR, we have to do a lot of stuff on the master
# again, manually, because REPLACE and INSERT IGNORE don't work in RBR
# like they do SBR.
my ($master_dbh, $master_dsn) = $sb->start_sandbox(
server => 'cmaster',
type => 'master',
env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
);
$sb->set_as_slave('node1', 'cmaster');
$sb->load_file('cmaster', "$sample/basic_with_fks.sql", undef, no_wait => 1);
$master_dbh->do("SET SESSION binlog_format=STATEMENT");
$master_dbh->do("REPLACE INTO percona_test.sentinel (id, ping) VALUES (1, '')");
$sb->wait_for_slaves(master => 'cmaster', slave => 'node1');
($output, $exit) = full_output(
sub { pt_online_schema_change::main(
"$master_dsn,D=pt_osc,t=city",
qw(--print --execute --alter-foreign-keys-method drop_swap),
'--alter', 'DROP COLUMN last_update'
)},
stderr => 1,
);
my $rows = $node1->selectrow_hashref("SHOW SLAVE STATUS");
is(
$rows->{last_error},
"",
"Alter table with foreign keys on master replicating to cluster"
) or diag(Dumper($rows), $output);
is(
$exit,
0,
"... exit 0"
) or diag($output);
$sb->stop_sandbox(qw(cmaster));
$node1->do("STOP SLAVE");
$node1->do("RESET SLAVE");
# ############################################################################# # #############################################################################
# Done. # Done.
# ############################################################################# # #############################################################################

View File

@@ -183,7 +183,7 @@ my ($slave_dbh, $slave_dsn) = $sb->start_sandbox(
server => 'cslave1', server => 'cslave1',
type => 'slave', type => 'slave',
master => 'node1', master => 'node1',
env => q/BINLOG_FORMAT="ROW"/, env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
); );
# Add the slave to the DSN table. # Add the slave to the DSN table.
@@ -214,7 +214,7 @@ $output = output(
like( like(
$output, $output,
qr/replica h=127.1,P=12348 has binlog_format ROW/, qr/replica h=127.1,P=12348 has binlog_format ROW/i,
"--check-binlog-format warns about slave's binlog format" "--check-binlog-format warns about slave's binlog format"
); );
@@ -251,7 +251,7 @@ $sb->stop_sandbox('cslave1');
server => 'cslave1', server => 'cslave1',
type => 'slave', type => 'slave',
master => 'node2', master => 'node2',
env => q/BINLOG_FORMAT="ROW"/, env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
); );
# Wait for the slave to apply the binlogs from node2 (its master). # Wait for the slave to apply the binlogs from node2 (its master).
@@ -291,28 +291,46 @@ $node1->do(qq/DELETE FROM dsns.dsns WHERE id=4/);
# master -> node1 in cluster, run on master # master -> node1 in cluster, run on master
# ############################################################################# # #############################################################################
# CAREFUL: The master and the cluster are different, so don't do stuff
# on the master that will conflict with stuff already done on the cluster.
# And since we're using RBR, we have to do a lot of stuff on the master
# again, manually, because REPLACE and INSERT IGNORE don't work in RBR
# like they do SBR.
my ($master_dbh, $master_dsn) = $sb->start_sandbox( my ($master_dbh, $master_dsn) = $sb->start_sandbox(
server => 'cmaster', server => 'cmaster',
type => 'master', type => 'master',
env => q/BINLOG_FORMAT="ROW"/, env => q/FORK="pxc" BINLOG_FORMAT="ROW"/,
); );
# CAREFUL: The master and the cluster are different, so we must load dbs on # Since master is new, node1 shouldn't have binlog to replay.
# the master then flush the logs, else node1 will apply the master's binlogs $sb->set_as_slave('node1', 'cmaster');
# and blow up because it already had these dbs.
# Remember: this DSN table only has node2 and node3 (12346 and 12347) which is
# sufficient for this test.
$sb->load_file('cmaster', "$sample/dsn-table.sql");
# We have to load a-z-cluster.sql else the pk id won'ts match because nodes use # We have to load a-z-cluster.sql else the pk id won'ts match because nodes use
# auto-inc offsets but the master doesn't. # auto-inc offsets but the master doesn't.
$sb->load_file('cmaster', "$sample/a-z-cluster.sql"); $sb->load_file('cmaster', "$sample/a-z-cluster.sql", undef, no_wait => 1);
$master_dbh->do("FLUSH LOGS"); # Do this stuff manually and only on the master because node1/the cluster
$master_dbh->do("RESET MASTER"); # already has it, and due to RBR, we can't do it other ways.
$master_dbh->do("SET sql_log_bin=0");
$sb->set_as_slave('node1', 'cmaster'); # This DSN table does not include 12345 (node1/slave) intentionally,
# so a later test can auto-find 12345 then warn "Diffs will only be
# detected if the cluster is consistent with h=127.1,P=12345...".
$master_dbh->do("CREATE DATABASE dsns");
$master_dbh->do("CREATE TABLE dsns.dsns (
id int auto_increment primary key,
parent_id int default null,
dsn varchar(255) not null
)");
$master_dbh->do("INSERT INTO dsns.dsns VALUES
(2, 1, 'h=127.1,P=12346,u=msandbox,p=msandbox'),
(3, 2, 'h=127.1,P=12347,u=msandbox,p=msandbox')");
$master_dbh->do("INSERT INTO percona_test.sentinel (id, ping) VALUES (1, '')");
$master_dbh->do("SET sql_log_bin=1");
$sb->wait_for_slaves(master => 'cmaster', slave => 'node1');
# Notice: no --recursion-method=dsn yet. Since node1 is a traditional slave # Notice: no --recursion-method=dsn yet. Since node1 is a traditional slave
# of the master, ptc should auto-detect it, which we'll test later by making # of the master, ptc should auto-detect it, which we'll test later by making

View File

@@ -8,9 +8,18 @@
set -x set -x
tarball="$1" tarball="$1"
version=$(echo $tarball | awk -F'-' '{print $2}')
full_dir=${tarball%".tar.gz"} full_dir=${tarball%".tar.gz"}
APP="${FORK:-"mysql"}"
if [ $APP = "mysql" ]; then
version=$(echo $tarball | awk -F'-' '{print $2}')
elif [ $APP = "pxc" ]; then
version=$(echo $tarball | awk -F'-' '{print $4}')
else
echo "Invalid FORK=$APP" >&2
exit 1
fi
tar xvfz "$tarball" \ tar xvfz "$tarball" \
--wildcards \ --wildcards \
"$full_dir/COPYING" \ "$full_dir/COPYING" \
@@ -29,7 +38,10 @@ tar xvfz "$tarball" \
"$full_dir/bin/mysqldump" \ "$full_dir/bin/mysqldump" \
"$full_dir/bin/mysqld" \ "$full_dir/bin/mysqld" \
"$full_dir/bin/mysqld_safe" \ "$full_dir/bin/mysqld_safe" \
"$full_dir/bin/safe_mysqld" "$full_dir/bin/safe_mysqld" \
"$full_dir/lib/libgalera_smm.so" \
"$full_dir/bin/clustercheck" \
"$full_dir/bin/wsrep*"
echo "This tarball was created from $tarball. It contains only the files necessary for creating a Percona Toolkit sandbox test server." > $full_dir/README.barebones echo "This tarball was created from $tarball. It contains only the files necessary for creating a Percona Toolkit sandbox test server." > $full_dir/README.barebones
@@ -40,7 +52,7 @@ else
arch="i386" arch="i386"
fi fi
bare_dir="mysql-$version-$arch-barebones" bare_dir="$APP-$version-$arch-barebones"
mv $full_dir $bare_dir mv $full_dir $bare_dir
tar cvfz $bare_dir.tar.gz $bare_dir tar cvfz $bare_dir.tar.gz $bare_dir
rm -rf $bare_dir rm -rf $bare_dir