mirror of
https://github.com/percona/percona-toolkit.git
synced 2025-09-01 18:25:59 +00:00
Remove trailing spaces (#665)
* Remove trailing spaces * PR-665 - Remove trailing spaces - Updated not stable test t/pt-online-schema-change/preserve_triggers.t - Updated utilities in bin directory * PR-665 - Remove trailing spaces - Fixed typos * PR-665 - Remove trailing spaces - Fixed typos --------- Co-authored-by: Sveta Smirnova <sveta.smirnova@percona.com>
This commit is contained in:
@@ -7647,7 +7647,7 @@ type: string
|
||||
Channel name used when connected to a server using replication channels.
|
||||
Suppose you have two masters, master_a at port 12345, master_b at port 1236 and
|
||||
a slave connected to both masters using channels chan_master_a and chan_master_b.
|
||||
If you want to run pt-archiver to syncronize the slave against master_a, pt-archiver
|
||||
If you want to run pt-archiver to synchronize the slave against master_a, pt-archiver
|
||||
won't be able to determine what's the correct master since SHOW SLAVE STATUS
|
||||
will return 2 rows. In this case, you can use --channel=chan_master_a to specify
|
||||
the channel name to use in the SHOW SLAVE STATUS command.
|
||||
|
@@ -6100,7 +6100,7 @@ sub main {
|
||||
|
||||
# 2)
|
||||
# RBR (Row Based Replication) converts REPLACE to INSERT if row isn't
|
||||
# present in master. This breakes replication when the row is present in slave.
|
||||
# present in master. This breaks replication when the row is present in slave.
|
||||
# Other workarounds also fail.
|
||||
# INSERT IGNORE (ignore is not replicated if no error in master)
|
||||
# DELETE then INSERT (DELETE is ignored, INSERT breaks replication)
|
||||
|
@@ -6646,7 +6646,7 @@ sub main {
|
||||
TableParser => $tp,
|
||||
Schema => $schema,
|
||||
);
|
||||
TALBE:
|
||||
TABLE:
|
||||
while ( my $tbl = $schema_itr->next() ) {
|
||||
eval {
|
||||
my $ddl = $tbl->{ddl};
|
||||
@@ -6843,7 +6843,7 @@ sub print_unused_indexes {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(unused drop Quoter);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($unused, $drop, $q) = @args{@required_args};
|
||||
|
||||
@@ -6892,7 +6892,7 @@ sub print_alter_drop_key {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(db_tbl idx Quoter);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($db_tbl, $idx, $q) = @args{@required_args};
|
||||
|
||||
@@ -6929,7 +6929,7 @@ sub create_save_results_database {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(dbh db Quoter);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($dbh, $db, $q) = @args{@required_args};
|
||||
my $sql;
|
||||
@@ -6964,7 +6964,7 @@ sub get_save_results_tables {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(OptionParser);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($o) = @args{@required_args};
|
||||
my $file = $args{file} || __FILE__;
|
||||
@@ -6988,7 +6988,7 @@ sub empty_save_results_tables {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(dbh db tbls Quoter);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($dbh, $db, $tbls, $q) = @args{@required_args};
|
||||
|
||||
@@ -7009,7 +7009,7 @@ sub create_save_results_tables {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(dbh db tbls);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($dbh, $db, $tbls) = @args{@required_args};
|
||||
|
||||
@@ -7026,7 +7026,7 @@ sub create_views {
|
||||
my ( %args ) = @_;
|
||||
my @required_args = qw(dbh);
|
||||
foreach my $arg ( @required_args ) {
|
||||
die "I need a $arg arugment" unless $args{$arg};
|
||||
die "I need a $arg argument" unless $args{$arg};
|
||||
}
|
||||
my ($dbh) = @args{@required_args};
|
||||
PTDEBUG && _d("Creating views");
|
||||
|
@@ -8465,7 +8465,7 @@ $OUTPUT_AUTOFLUSH = 1;
|
||||
|
||||
use constant {
|
||||
INVALID_PARAMETERS => 1,
|
||||
UNSUPORTED_MYSQL_VERSION => 2,
|
||||
UNSUPPORTED_MYSQL_VERSION => 2,
|
||||
NO_MINIMUM_REQUIREMENTS => 3,
|
||||
NO_PRIMARY_OR_UNIQUE_KEY => 4,
|
||||
INVALID_PLUGIN_FILE => 5,
|
||||
@@ -8480,7 +8480,7 @@ use constant {
|
||||
ERROR_SWAPPING_TABLES => 14,
|
||||
ERROR_UPDATING_FKS => 15,
|
||||
ERROR_DROPPING_OLD_TABLE => 16,
|
||||
UNSUPORTED_OPERATION => 17,
|
||||
UNSUPPORTED_OPERATION => 17,
|
||||
MYSQL_CONNECTION_ERROR => 18,
|
||||
LOST_MYSQL_CONNECTION => 19,
|
||||
ERROR_CREATING_REVERSE_TRIGGERS => 20,
|
||||
@@ -8724,7 +8724,7 @@ sub main {
|
||||
. "this tool on a cluster, but node " . $cxn->name
|
||||
. " is running version " . $pxc_version->version
|
||||
. ". Please upgrade the node, or run the tool on a newer node, "
|
||||
. "or contact Percona for support.", UNSUPORTED_MYSQL_VERSION);
|
||||
. "or contact Percona for support.", UNSUPPORTED_MYSQL_VERSION);
|
||||
}
|
||||
if ( $pxc_version < '5.6' && $o->got('max-flow-ctl') ) {
|
||||
_die("Option '--max-flow-ctl is only available for PXC version 5.6 "
|
||||
@@ -8756,7 +8756,7 @@ sub main {
|
||||
# ########################################################################
|
||||
my $server_version = VersionParser->new($cxn->dbh());
|
||||
if ( $server_version < '5.0.10' ) {
|
||||
_die("This tool requires MySQL 5.0.10 or newer.", UNSUPORTED_MYSQL_VERSION);
|
||||
_die("This tool requires MySQL 5.0.10 or newer.", UNSUPPORTED_MYSQL_VERSION);
|
||||
}
|
||||
|
||||
# Use LOCK IN SHARE mode unless MySQL 5.0 because there's a bug like
|
||||
@@ -8770,7 +8770,7 @@ sub main {
|
||||
my $analyze_table = $o->get('analyze-before-swap');
|
||||
if ( $o->got('analyze-before-swap') ) {
|
||||
# User specified so respect their wish. If --analyze-before-swap, do it
|
||||
# regardless of MySQL version and innodb_stats_peristent.
|
||||
# regardless of MySQL version and innodb_stats_persistent.
|
||||
# If --no-analyze-before-swap, don't do it.
|
||||
PTDEBUG && _d('User specified explicit --analyze-before-swap:',
|
||||
($analyze_table ? 'on' : 'off'));
|
||||
@@ -8783,10 +8783,10 @@ sub main {
|
||||
my (undef, $innodb_stats_persistent) = $cxn->dbh->selectrow_array(
|
||||
"SHOW VARIABLES LIKE 'innodb_stats_persistent'");
|
||||
if ($innodb_stats_persistent eq 'ON' || $innodb_stats_persistent eq '1') {
|
||||
PTDEBUG && _d('innodb_stats_peristent is ON, enabling --analyze-before-swap');
|
||||
PTDEBUG && _d('innodb_stats_persistent is ON, enabling --analyze-before-swap');
|
||||
$analyze_table = 1;
|
||||
} else {
|
||||
PTDEBUG && _d('innodb_stats_peristent is OFF, disabling --analyze-before-swap');
|
||||
PTDEBUG && _d('innodb_stats_persistent is OFF, disabling --analyze-before-swap');
|
||||
$analyze_table = 0;
|
||||
}
|
||||
} else {
|
||||
@@ -9975,7 +9975,7 @@ sub main {
|
||||
|
||||
# Adjust chunk size. This affects the next chunk.
|
||||
if ( $chunk_time ) {
|
||||
# Calcuate a new chunk-size based on the rate of rows/s.
|
||||
# Calculate a new chunk-size based on the rate of rows/s.
|
||||
$tbl->{chunk_size} = $tbl->{rate}->update(
|
||||
$cnt, # processed this many rows
|
||||
$tbl->{nibble_time}, # is this amount of time
|
||||
@@ -10457,14 +10457,14 @@ sub check_alter {
|
||||
if ( ($tbl->{tbl_struct}->{engine} || '') =~ m/RocksDB/i ) {
|
||||
if ($alter =~ m/FOREIGN KEY/i) {
|
||||
my $msg = "FOREIGN KEYS are not supported by the RocksDB engine\n\n";
|
||||
_die($msg, UNSUPORTED_OPERATION);
|
||||
_die($msg, UNSUPPORTED_OPERATION);
|
||||
}
|
||||
}
|
||||
if ( $alter =~ m/Engine\s*=\s*["']?RocksDB["']?/i ) {
|
||||
my $row = $cxn->dbh()->selectrow_arrayref('SELECT @@binlog_format');
|
||||
if (scalar $row > 0 && $row->[0] eq 'STATEMENT') {
|
||||
_die("Cannot change engine to RocksDB while binlog_format is other than 'ROW'",
|
||||
UNSUPORTED_OPERATION);
|
||||
UNSUPPORTED_OPERATION);
|
||||
}
|
||||
}
|
||||
# ########################################################################
|
||||
@@ -10536,7 +10536,7 @@ sub check_alter {
|
||||
|
||||
if ( !$ok ) {
|
||||
# check_alter.t relies on this output.
|
||||
_die("--check-alter failed.\n", UNSUPORTED_OPERATION);
|
||||
_die("--check-alter failed.\n", UNSUPPORTED_OPERATION);
|
||||
}
|
||||
|
||||
return;
|
||||
@@ -10569,7 +10569,7 @@ sub _has_self_ref_fks {
|
||||
# [ 'C2', 'c3' ]
|
||||
# ];
|
||||
#
|
||||
# Thse fields are used to build an example SELECT to detect if currently there are
|
||||
# These fields are used to build an example SELECT to detect if currently there are
|
||||
# rows that will produce duplicates when the new UNIQUE INDEX is created.
|
||||
|
||||
sub get_unique_index_fields {
|
||||
@@ -10842,7 +10842,7 @@ sub create_new_table {
|
||||
};
|
||||
}
|
||||
|
||||
die "Failed to find a unique new table name after $tries attemps. "
|
||||
die "Failed to find a unique new table name after $tries attempts. "
|
||||
. "The following tables exist which may be left over from previous "
|
||||
. "failed runs of the tool:\n"
|
||||
. join("\n", map { " $_" } @old_tables)
|
||||
@@ -10975,7 +10975,7 @@ sub swap_tables {
|
||||
|
||||
# This shouldn't happen.
|
||||
die ts("Failed to find a unique old table name after "
|
||||
. "serveral attempts.\n");
|
||||
. "several attempts.\n");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11555,7 +11555,7 @@ sub random_suffix {
|
||||
# orig_table.......: Original table name. Used to LOCK the table.
|
||||
# In case we are creating a new temporary trigger for testing
|
||||
# purposes or if --no-swap-tables is enabled, this param should
|
||||
# be omitted since we are creating a completelly new trigger so,
|
||||
# be omitted since we are creating a completely new trigger so,
|
||||
# since in this case we are not going to DROP the old trigger,
|
||||
# there is no need for a LOCK
|
||||
#
|
||||
@@ -12915,7 +12915,7 @@ type: DSN; repeatable: yes
|
||||
|
||||
DSN to skip when checking slave lag. It can be used multiple times.
|
||||
Example: --skip-check-slave-lag h=127.0.0.1,P=12345 --skip-check-slave-lag h=127.0.0.1,P=12346
|
||||
Plase take into consideration that even when for the MySQL driver h=127.1 is equal to h=127.0.0.1,
|
||||
Please take into consideration that even when for the MySQL driver h=127.1 is equal to h=127.0.0.1,
|
||||
for this parameter you need to specify the full IP address.
|
||||
|
||||
=item --slave-user
|
||||
@@ -13374,7 +13374,7 @@ parameters are shown in the output.
|
||||
=head1 EXIT STATUS
|
||||
|
||||
INVALID_PARAMETERS = 1
|
||||
UNSUPORTED_MYSQL_VERSION = 2
|
||||
UNSUPPORTED_MYSQL_VERSION = 2
|
||||
NO_MINIMUM_REQUIREMENTS = 3
|
||||
NO_PRIMARY_OR_UNIQUE_KEY = 4
|
||||
INVALID_PLUGIN_FILE = 5
|
||||
@@ -13389,7 +13389,7 @@ parameters are shown in the output.
|
||||
ERROR_SWAPPING_TABLES = 14
|
||||
ERROR_UPDATING_FKS = 15
|
||||
ERROR_DROPPING_OLD_TABLE = 16
|
||||
UNSUPORTED_OPERATION = 17
|
||||
UNSUPPORTED_OPERATION = 17
|
||||
MYSQL_CONNECTION_ERROR = 18
|
||||
LOST_MYSQL_CONNECTION = 19
|
||||
ERROR_CREATING_REVERSE_TRIGGERS = 20
|
||||
|
@@ -13425,7 +13425,7 @@ sub main {
|
||||
my %stats; # various stats/counters used in some procs
|
||||
|
||||
# The pipeline data hashref is passed to each proc. Procs use this to
|
||||
# pass data through the pipeline. The most importat data is the event.
|
||||
# pass data through the pipeline. The most important data is the event.
|
||||
# Other data includes in the next_event callback, time and iters left,
|
||||
# etc. This hashref is accessed inside a proc via the $args arg.
|
||||
my $pipeline_data = {
|
||||
@@ -13492,7 +13492,7 @@ sub main {
|
||||
}
|
||||
PTDEBUG && _d('Reading', $filename);
|
||||
PTDEBUG && _d('File size:', $filesize);
|
||||
# catch if user is trying to use an uncoverted (raw) binlog # issue 1377888
|
||||
# catch if user is trying to use an unconverted (raw) binlog # issue 1377888
|
||||
if ( $filename && $o->get('type')->[0] eq 'binlog') {
|
||||
if (is_raw_binlog($filename)) {
|
||||
warn "Binlog file $filename must first be converted to text format using mysqlbinlog";
|
||||
|
@@ -2238,7 +2238,7 @@ sub split_grants {
|
||||
(?:INSERT|SELECT|UPDATE)\s\(.+?\) # a column grants
|
||||
| [A-Z\s]+
|
||||
)
|
||||
(?:,\s)? # Separted from the next grant, if any, by a comma
|
||||
(?:,\s)? # Separated from the next grant, if any, by a comma
|
||||
/xg;
|
||||
# sort columns in column-level permissions (bug lp-1523730)
|
||||
@grants = map {
|
||||
|
@@ -571,7 +571,7 @@ main() {
|
||||
BASEDIR="$(dirname "$1")"
|
||||
PREFIX="$(echo "$1" | perl -ne '$_ =~ m/([\d_]+)/; print $1;')"
|
||||
else
|
||||
echo "Error: $1 is not a directory, and there are no pt-stalk files in the curent working directory ($BASEDIR) with a $1 prefix." >&2
|
||||
echo "Error: $1 is not a directory, and there are no pt-stalk files in the current working directory ($BASEDIR) with a $1 prefix." >&2
|
||||
echo "For more information, 'man pt-sift' or 'perldoc $0'." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
@@ -10135,7 +10135,7 @@ sub main {
|
||||
# https://bugs.launchpad.net/percona-toolkit/+bug/919352
|
||||
# The tool shouldn't blindly attempt to change binlog_format;
|
||||
# instead, it should check if it's already set to STATEMENT.
|
||||
# This is becase starting with MySQL 5.1.29, changing the format
|
||||
# This is because starting with MySQL 5.1.29, changing the format
|
||||
# requires a SUPER user.
|
||||
if ( VersionParser->new($dbh) >= '5.1.5' ) {
|
||||
$sql = 'SELECT @@binlog_format';
|
||||
@@ -11362,7 +11362,7 @@ sub main {
|
||||
# to each slave.
|
||||
# MySQL 8+ replication is slower than 5.7 and the old wait_for_last_checksum alone
|
||||
# was failing. The new wait_for_slaves checks that Read_Master_Log_Pos on slaves is
|
||||
# greather or equal Position in the master
|
||||
# greater or equal Position in the master
|
||||
if (!$args{Cxn}->is_cluster_node()) {
|
||||
wait_for_slaves(master_dbh => $args{Cxn}->dbh(), master_slave => $ms, slaves => $slaves);
|
||||
}
|
||||
@@ -12885,8 +12885,8 @@ of the tool; they're only reminders to help avoid false-positive results.
|
||||
|
||||
=item RocksDB support
|
||||
|
||||
Due to the limitations in the RocksDB engine like not suporting binlog_format=STATEMENT
|
||||
or they way RocksDB handles Gap locks, pt-table-cheksum will skip tables using RocksDB engine.
|
||||
Due to the limitations in the RocksDB engine like not supporting binlog_format=STATEMENT
|
||||
or they way RocksDB handles Gap locks, pt-table-checksum will skip tables using RocksDB engine.
|
||||
More Information: (L<https://www.percona.com/doc/percona-server/LATEST/myrocks/limitations.html>)
|
||||
|
||||
=back
|
||||
@@ -13800,7 +13800,7 @@ Socket file to use for connection.
|
||||
|
||||
type: float; default: 1.0
|
||||
|
||||
When a master table is marked to be checksumed in only one chunk but a slave
|
||||
When a master table is marked to be checksummed in only one chunk but a slave
|
||||
table exceeds the maximum accepted size for this, the table is skipped.
|
||||
Since number of rows are often rough estimates, many times tables are skipped
|
||||
needlessly for very small differences.
|
||||
@@ -13832,7 +13832,7 @@ remove them. These differences will cause false checksum differences.
|
||||
|
||||
Truncate the replicate table before starting the checksum.
|
||||
This parameter differs from L<--empty-replicate-table> which only deletes the rows
|
||||
for the table being checksumed when starting the checksum for that table, while
|
||||
for the table being checksummed when starting the checksum for that table, while
|
||||
L<--truncate-replicate-table> will truncate the replicate table at the beginning of the
|
||||
process and thus, all previous checksum information will be losti, even if the process
|
||||
stops due to an error.
|
||||
|
@@ -10126,7 +10126,7 @@ sub main {
|
||||
|
||||
# Create callbacks for bidirectional syncing. Currently, this only
|
||||
# works with TableSyncChunk, so that should be the only plugin because
|
||||
# --algorithms was overriden earlier.
|
||||
# --algorithms was overridden earlier.
|
||||
if ( $o->get('bidirectional') ) {
|
||||
set_bidirectional_callbacks(
|
||||
plugin => $plugins->[0],
|
||||
@@ -11146,7 +11146,7 @@ sub get_cxn {
|
||||
# https://bugs.launchpad.net/percona-toolkit/+bug/919352
|
||||
# The tool shouldn't blindly attempt to change binlog_format;
|
||||
# instead, it should check if it's already set to STATEMENT.
|
||||
# This is becase starting with MySQL 5.1.29, changing the format
|
||||
# This is because starting with MySQL 5.1.29, changing the format
|
||||
# requires a SUPER user.
|
||||
if ( VersionParser->new($dbh) >= '5.1.29'
|
||||
&& ($o->get('replicate') || $o->get('sync-to-master'))) {
|
||||
@@ -11538,7 +11538,7 @@ sub set_bidirectional_callbacks {
|
||||
}
|
||||
}
|
||||
elsif ( $res == FAILED_THRESHOLD ) {
|
||||
$err = "`$col` values do not differ by the threhold, $thr."
|
||||
$err = "`$col` values do not differ by the threshold, $thr."
|
||||
}
|
||||
else {
|
||||
# Shouldn't happen.
|
||||
@@ -11577,7 +11577,7 @@ sub set_bidirectional_callbacks {
|
||||
# Get internal TableSync* plugins.
|
||||
#
|
||||
# Returns:
|
||||
# Hash of available algoritms and the plugin/module names that
|
||||
# Hash of available algorithms and the plugin/module names that
|
||||
# implement them, like "chunk => TableSyncChunk".
|
||||
sub get_plugins {
|
||||
my ( %args ) = @_;
|
||||
|
@@ -4144,7 +4144,7 @@ sub parse_insert {
|
||||
(?:INTO\s+)? # INTO, optional
|
||||
(.+?)\s+ # table ref
|
||||
(\([^\)]+\)\s+)? # column list, optional
|
||||
(VALUE.?|SET|SELECT)\s+ # start of next caluse
|
||||
(VALUE.?|SET|SELECT)\s+ # start of next clause
|
||||
/xgci)
|
||||
) {
|
||||
my $tbl = shift @into; # table ref
|
||||
@@ -4325,7 +4325,7 @@ sub parse_table_reference {
|
||||
|
||||
if ( $tbl_ref =~ s/
|
||||
\s+(
|
||||
(?:FORCE|USE|INGORE)\s
|
||||
(?:FORCE|USE|IGNORE)\s
|
||||
(?:INDEX|KEY)
|
||||
\s*\([^\)]+\)\s*
|
||||
)//xi)
|
||||
@@ -7725,7 +7725,7 @@ sub main {
|
||||
return; # 1. exit pipeline early
|
||||
}
|
||||
|
||||
# There's input and time left so keep runnning...
|
||||
# There's input and time left so keep running...
|
||||
if ( $args->{event} ) {
|
||||
PTDEBUG && _d("Event in pipeline, continuing");
|
||||
return $args;
|
||||
|
@@ -10770,7 +10770,7 @@ steps, one of which is ensuring that queries will produce identical results
|
||||
on the new version of MySQL.
|
||||
|
||||
pt-upgrade executes queries from slow, general, binary, tcpdump, and
|
||||
"raw" logs on two servers, compares many aspects of each query's exeuction
|
||||
"raw" logs on two servers, compares many aspects of each query's execution
|
||||
and results, and reports any significant differences. The two servers are
|
||||
typically development servers, one running the current production version
|
||||
of MySQL and the other running the new version of MySQL.
|
||||
@@ -11008,7 +11008,7 @@ or query errors has L<"--max-examples">. Else, all queries with differences
|
||||
are reported when the tool finishes.
|
||||
|
||||
For example, if two query time differences are found for a query class,
|
||||
it is not reported yet. Once a third query time diffence is found,
|
||||
it is not reported yet. Once a third query time difference is found,
|
||||
the query class is reported, including any other differences that may
|
||||
have been found too. Queries for the class will continue to be executed,
|
||||
but the class will not be reported again.
|
||||
|
@@ -447,7 +447,7 @@ SKIP: {
|
||||
|
||||
diag("Reloading sakila");
|
||||
my $master_port = $sb->port_for('master');
|
||||
system "$trunk/sandbox/load-sakila-db $master_port &";
|
||||
system "$trunk/sandbox/load-sakila-db $master_port";
|
||||
|
||||
if ($sandbox_version ge '8.0') {
|
||||
$sb->do_as_root("master", q/CREATE USER 'slave_user'@'%' IDENTIFIED WITH mysql_native_password BY 'slave_password'/);
|
||||
|
Reference in New Issue
Block a user