mirror of
https://gitee.com/dromara/dbswitch.git
synced 2025-08-29 00:44:19 +00:00
v1.6.15:配置任务执行的线程池
This commit is contained in:
@@ -2,13 +2,22 @@
|
||||
|
||||
## 一、操作流程
|
||||
|
||||

|
||||
|
||||
## 二、代码格式
|
||||
|
||||
dbswitch使用java编码开发,这里以IDEA为例,指定使用google的代码风格,可将项目下的intellij-java-google-style.xml文件导入到IDEA中。
|
||||

|
||||
|
||||

|
||||
## 二、开发规范
|
||||
|
||||
### 1、编码规范
|
||||
|
||||
请遵循阿里巴巴编码规范(Java),IDEA推荐直接安装`Alibaba Java Coding Guidelines`
|
||||
插件: https://plugins.jetbrains.com/plugin/10046-alibaba-java-coding-guidelines
|
||||
|
||||
### 2、代码格式
|
||||
|
||||
dbswitch使用java编码开发,这里以IDEA为例,指定使用google的代码风格,可将项目下的intellij-java-google-style.xml文件导入到IDEA中,也可到如下地址进行下载:
|
||||
|
||||
https://github.com/google/styleguide/blob/gh-pages/intellij-java-google-style.xml
|
||||
|
||||

|
||||
|
||||
## 二、操作步骤
|
||||
|
||||
@@ -16,22 +25,31 @@ dbswitch使用java编码开发,这里以IDEA为例,指定使用google的代
|
||||
|
||||
首先使用gitee账号(如果没有请先注册)登陆后,然后进入地址```https://gitee.com/inrgihc/dbswitch```中,点击```fork```按钮,在自己账号下复制一份dbswitch项目,如下图:
|
||||
|
||||

|
||||

|
||||
|
||||
### 2、Clone 自己的项目
|
||||
|
||||
- (1) 拉取远端的代码到本地
|
||||
|
||||
```
|
||||
git clone -b dev https://gitee.com/tangyibo/dbswitch.git
|
||||
git clone -b master https://gitee.com/tangyibo/dbswitch.git
|
||||
cd dbswitch/
|
||||
git checkout -b dev
|
||||
```
|
||||
|
||||
**注:请替换上述中的tangyibo为您gitee的实际账号信息**
|
||||
|
||||
- (2) 请正确配置自己的信息(此信息会包含在后面的commit提交信息中)
|
||||
|
||||
```
|
||||
git config --local user.name xxxx
|
||||
git config --local user.email xxxx@126.com
|
||||
```
|
||||
|
||||
**注:请替换上述中的xxxx为实际的信息**
|
||||
|
||||
- (3) 配置远端dbswitch项目的名称(这里假设为upstream)
|
||||
|
||||
```
|
||||
git remote add upstream https://gitee.com/inrgihc/dbswitch.git
|
||||
```
|
||||
@@ -41,16 +59,19 @@ git remote add upstream https://gitee.com/inrgihc/dbswitch.git
|
||||
- (1) 在本地向自己的分支中提交代码:
|
||||
|
||||
- (2) fetch 远端dbswitch的最新代码
|
||||
|
||||
```
|
||||
git fetch upstream master
|
||||
```
|
||||
|
||||
- (3) rebase 远端dbswitch的最新代码(处理可能存在的冲突)
|
||||
|
||||
```
|
||||
git rebase upstream/master
|
||||
```
|
||||
|
||||
- (4) push 本地代码到远端
|
||||
|
||||
```
|
||||
git push
|
||||
```
|
||||
@@ -63,10 +84,10 @@ git push
|
||||
|
||||
在自己的分支上创建pull requests:
|
||||
|
||||

|
||||

|
||||
|
||||
选择对应的分支:
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
填写好“标题”和“描述”,点击“创建Pull Request"即可完成代码提交合入申请操作,审核通过后您成为dbswitch项目贡献者。
|
||||
|
@@ -2,7 +2,7 @@
|
||||
|
||||
set -e
|
||||
|
||||
DBSWITCH_VERSION=1.6.15
|
||||
DBSWITCH_VERSION=1.6.16
|
||||
BUILD_DOCKER_DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
PROJECT_ROOT_DIR=$( dirname "$BUILD_DOCKER_DIR")
|
||||
DOCKER_DBSWITCH_DIR=$BUILD_DOCKER_DIR/dbswitch
|
||||
|
@@ -13,7 +13,7 @@ services:
|
||||
MYSQL_ROOT_HOST: '%'
|
||||
dbswitch:
|
||||
container_name: dbswitch_webui
|
||||
image: inrgihc/dbswitch:1.6.15
|
||||
image: inrgihc/dbswitch:1.6.16
|
||||
environment:
|
||||
MYSQLDB_HOST: dbswitch_mysqldb
|
||||
MYSQLDB_PORT: 3306
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-admin</artifactId>
|
||||
|
@@ -13,7 +13,7 @@ import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.web.servlet.config.annotation.CorsRegistry;
|
||||
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
|
||||
|
||||
@Configuration
|
||||
@Configuration("dbswitchCorsConfig")
|
||||
public class CorsConfig implements WebMvcConfigurer {
|
||||
|
||||
@Override
|
||||
|
@@ -0,0 +1,35 @@
|
||||
package com.gitee.dbswitch.admin.config;
|
||||
|
||||
import com.gitee.dbswitch.data.util.DataSourceUtils;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.task.AsyncTaskExecutor;
|
||||
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||
|
||||
@Configuration("dbswitchExecutorConfig")
|
||||
public class ExecutorConfig {
|
||||
|
||||
public final static String TASK_EXECUTOR_BEAN_NAME = "migrationTaskExecutor";
|
||||
|
||||
/**
|
||||
* 创建一个异步任务执行ThreadPoolTaskExecutor
|
||||
*
|
||||
* @return ThreadPoolTaskExecutor
|
||||
*/
|
||||
@Bean(TASK_EXECUTOR_BEAN_NAME)
|
||||
public AsyncTaskExecutor createTableMigrationTaskExecutor() {
|
||||
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
|
||||
taskExecutor.setCorePoolSize(DataSourceUtils.MAX_THREAD_COUNT);
|
||||
taskExecutor.setMaxPoolSize(DataSourceUtils.MAX_THREAD_COUNT);
|
||||
taskExecutor.setQueueCapacity(10000);
|
||||
taskExecutor.setKeepAliveSeconds(1800);
|
||||
taskExecutor.setDaemon(true);
|
||||
taskExecutor.setThreadGroupName("dbswitch");
|
||||
taskExecutor.setThreadNamePrefix("dbswitch-migration-");
|
||||
taskExecutor.setBeanName(TASK_EXECUTOR_BEAN_NAME);
|
||||
taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
|
||||
taskExecutor.initialize();
|
||||
return taskExecutor;
|
||||
}
|
||||
}
|
@@ -29,7 +29,7 @@ import org.springframework.web.servlet.HandlerInterceptor;
|
||||
import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
|
||||
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
|
||||
|
||||
@Configuration
|
||||
@Configuration("dbswitchInterceptorConfig")
|
||||
public class InterceptorConfig implements WebMvcConfigurer {
|
||||
|
||||
@Override
|
||||
|
@@ -25,7 +25,7 @@ import org.springframework.scheduling.quartz.SchedulerFactoryBean;
|
||||
import org.springframework.scheduling.quartz.SpringBeanJobFactory;
|
||||
|
||||
@Slf4j
|
||||
@Configuration
|
||||
@Configuration("dbswitchQuartzConfig")
|
||||
public class QuartzConfig {
|
||||
|
||||
@Bean
|
||||
@@ -63,7 +63,6 @@ public class QuartzConfig {
|
||||
// Configure JobStore 作业存储配置
|
||||
/////////////////////////////////////
|
||||
|
||||
prop.put("org.quartz.jobStore.misfireThreshold", "60000");
|
||||
// 数据库方式 JobStore配置
|
||||
prop.put("org.quartz.jobStore.class", "org.quartz.impl.jdbcjobstore.JobStoreTX");
|
||||
//持久化方式配置数据驱动
|
||||
|
@@ -26,7 +26,7 @@ import springfox.documentation.spi.DocumentationType;
|
||||
import springfox.documentation.spring.web.plugins.Docket;
|
||||
import springfox.documentation.swagger2.annotations.EnableSwagger2;
|
||||
|
||||
@Configuration
|
||||
@Configuration("dbswitchSwaggerConfig")
|
||||
@EnableSwagger2
|
||||
public class SwaggerConfig {
|
||||
|
||||
|
@@ -14,7 +14,7 @@ import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry
|
||||
import org.springframework.web.servlet.config.annotation.ViewControllerRegistry;
|
||||
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
|
||||
|
||||
@Configuration
|
||||
@Configuration("dbswitchWebMvcConfig")
|
||||
public class WebMvcConfig implements WebMvcConfigurer {
|
||||
|
||||
@Override
|
||||
|
@@ -36,12 +36,15 @@ import org.quartz.JobExecutionException;
|
||||
import org.quartz.JobKey;
|
||||
import org.quartz.PersistJobDataAfterExecution;
|
||||
import org.quartz.UnableToInterruptJobException;
|
||||
import org.springframework.core.task.AsyncTaskExecutor;
|
||||
import org.springframework.scheduling.quartz.QuartzJobBean;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* 如果你使用了@PersistJobDataAfterExecution注解,则强烈建议你同时使用@DisallowConcurrentExecution注 解,因为当同一个job(JobDetail)的两个实例被并发执行时,由于竞争,JobDataMap中存储的数据很可能是不确定的。
|
||||
* </p>
|
||||
* 如果你使用了@PersistJobDataAfterExecution注解,则强烈建议你同时使用@DisallowConcurrentExecution注解,
|
||||
* <p>
|
||||
* 因为当同一个job(JobDetail)的两个实例被并发执行时,由于竞争,JobDataMap中存储的数据很可能是不确定的。
|
||||
* <p>
|
||||
*/
|
||||
@Slf4j
|
||||
@PersistJobDataAfterExecution
|
||||
@@ -84,6 +87,9 @@ public class JobExecutorService extends QuartzJobBean implements InterruptableJo
|
||||
@Resource
|
||||
private AssignmentJobDAO assignmentJobDAO;
|
||||
|
||||
@Resource
|
||||
private AsyncTaskExecutor migrationTaskExecutor;
|
||||
|
||||
/**
|
||||
* 实现setter方法,Quartz会给成员变量taskId注入值
|
||||
*
|
||||
@@ -146,7 +152,7 @@ public class JobExecutorService extends QuartzJobBean implements InterruptableJo
|
||||
properties.getTarget().setChangeDataSync(true);
|
||||
}
|
||||
|
||||
MigrationService mainService = new MigrationService(properties);
|
||||
MigrationService mainService = new MigrationService(properties, migrationTaskExecutor);
|
||||
if (interrupted) {
|
||||
log.info("Quartz task id:{} interrupted when prepare stage", jobDataMap.getLong(TASK_ID));
|
||||
return;
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-common</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-core</artifactId>
|
||||
|
@@ -23,7 +23,7 @@ import java.sql.ResultSetMetaData;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
@@ -56,7 +56,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
|
||||
|
||||
@Override
|
||||
public List<String> querySchemaList(Connection connection) {
|
||||
Set<String> ret = new HashSet<>();
|
||||
Set<String> ret = new LinkedHashSet<>();
|
||||
try (ResultSet schemas = connection.getMetaData().getSchemas()) {
|
||||
while (schemas.next()) {
|
||||
ret.add(schemas.getString("TABLE_SCHEM"));
|
||||
@@ -70,7 +70,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
|
||||
@Override
|
||||
public List<TableDescription> queryTableList(Connection connection, String schemaName) {
|
||||
List<TableDescription> ret = new ArrayList<>();
|
||||
Set<String> uniqueSet = new HashSet<>();
|
||||
Set<String> uniqueSet = new LinkedHashSet<>();
|
||||
String[] types = new String[]{"TABLE", "VIEW"};
|
||||
try (ResultSet tables = connection.getMetaData()
|
||||
.getTables(this.catalogName, schemaName, "%", types)) {
|
||||
@@ -106,7 +106,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
|
||||
@Override
|
||||
public List<String> queryTableColumnName(Connection connection, String schemaName,
|
||||
String tableName) {
|
||||
Set<String> columns = new HashSet<>();
|
||||
Set<String> columns = new LinkedHashSet<>();
|
||||
try (ResultSet rs = connection.getMetaData()
|
||||
.getColumns(this.catalogName, schemaName, tableName, null)) {
|
||||
while (rs.next()) {
|
||||
@@ -145,14 +145,11 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
|
||||
@Override
|
||||
public List<String> queryTablePrimaryKeys(Connection connection, String schemaName,
|
||||
String tableName) {
|
||||
Set<String> ret = new HashSet<>();
|
||||
Set<String> ret = new LinkedHashSet<>();
|
||||
try (ResultSet primaryKeys = connection.getMetaData()
|
||||
.getPrimaryKeys(this.catalogName, schemaName, tableName)) {
|
||||
while (primaryKeys.next()) {
|
||||
String name = primaryKeys.getString("COLUMN_NAME");
|
||||
if (!ret.contains(name)) {
|
||||
ret.add(name);
|
||||
}
|
||||
ret.add(primaryKeys.getString("COLUMN_NAME"));
|
||||
}
|
||||
return new ArrayList<>(ret);
|
||||
} catch (SQLException e) {
|
||||
@@ -187,11 +184,11 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
|
||||
List<Object> row = new ArrayList<>(count);
|
||||
for (int i = 1; i <= count; i++) {
|
||||
Object value = rs.getObject(i);
|
||||
if (value != null && value instanceof byte[]) {
|
||||
if (value instanceof byte[]) {
|
||||
row.add(DbswitchStrUtils.toHexString((byte[]) value));
|
||||
} else if (value != null && value instanceof java.sql.Clob) {
|
||||
} else if (value instanceof java.sql.Clob) {
|
||||
row.add(TypeConvertUtils.castToString(value));
|
||||
} else if (value != null && value instanceof java.sql.Blob) {
|
||||
} else if (value instanceof java.sql.Blob) {
|
||||
byte[] bytes = TypeConvertUtils.castToByteArray(value);
|
||||
row.add(DbswitchStrUtils.toHexString(bytes));
|
||||
} else {
|
||||
@@ -211,7 +208,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
|
||||
@Override
|
||||
public void testQuerySQL(Connection connection, String sql) {
|
||||
String wrapperSql = this.getTestQuerySQL(sql);
|
||||
try (Statement statement = connection.createStatement();) {
|
||||
try (Statement statement = connection.createStatement()) {
|
||||
statement.execute(wrapperSql);
|
||||
} catch (SQLException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-data</artifactId>
|
||||
|
@@ -0,0 +1,36 @@
|
||||
package com.gitee.dbswitch.data.config;
|
||||
|
||||
import com.gitee.dbswitch.data.util.DataSourceUtils;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.task.AsyncTaskExecutor;
|
||||
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||
|
||||
@Configuration("dbswitchTaskExecutorConfig")
|
||||
public class TaskExecutorConfig {
|
||||
|
||||
public final static String TASK_EXECUTOR_BEAN_NAME = "tableMigrationExecutor";
|
||||
|
||||
/**
|
||||
* 创建一个异步任务执行ThreadPoolTaskExecutor
|
||||
*
|
||||
* @return ThreadPoolTaskExecutor
|
||||
*/
|
||||
@Bean(TASK_EXECUTOR_BEAN_NAME)
|
||||
public AsyncTaskExecutor createTableMigrationTaskExecutor() {
|
||||
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
|
||||
taskExecutor.setCorePoolSize(DataSourceUtils.MAX_THREAD_COUNT);
|
||||
taskExecutor.setMaxPoolSize(DataSourceUtils.MAX_THREAD_COUNT);
|
||||
taskExecutor.setQueueCapacity(10000);
|
||||
taskExecutor.setKeepAliveSeconds(1800);
|
||||
taskExecutor.setDaemon(true);
|
||||
taskExecutor.setThreadGroupName("dbswitch");
|
||||
taskExecutor.setThreadNamePrefix("dbswitch-migration-");
|
||||
taskExecutor.setBeanName(TASK_EXECUTOR_BEAN_NAME);
|
||||
taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
|
||||
taskExecutor.initialize();
|
||||
return taskExecutor;
|
||||
}
|
||||
|
||||
}
|
@@ -20,7 +20,7 @@ public class TargetDataSourceProperties {
|
||||
private String username;
|
||||
private String password;
|
||||
private Long connectionTimeout = TimeUnit.SECONDS.toMillis(60);
|
||||
private Long maxLifeTime = TimeUnit.MINUTES.toMillis(60);
|
||||
private Long maxLifeTime = TimeUnit.MINUTES.toMillis(30);
|
||||
|
||||
private String targetSchema = "";
|
||||
private Boolean targetDrop = Boolean.TRUE;
|
||||
|
@@ -10,7 +10,6 @@
|
||||
package com.gitee.dbswitch.data.service;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.gitee.dbswitch.common.type.DBTableType;
|
||||
import com.gitee.dbswitch.common.util.DbswitchStrUtils;
|
||||
import com.gitee.dbswitch.core.model.TableDescription;
|
||||
import com.gitee.dbswitch.core.service.IMetaDataByDatasourceService;
|
||||
@@ -24,6 +23,7 @@ import com.gitee.dbswitch.data.util.DataSourceUtils;
|
||||
import com.zaxxer.hikari.HikariDataSource;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
@@ -31,9 +31,9 @@ import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.regex.Pattern;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.core.task.AsyncTaskExecutor;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StopWatch;
|
||||
import org.springframework.util.StringUtils;
|
||||
|
||||
/**
|
||||
* 数据迁移主逻辑类
|
||||
@@ -59,13 +59,19 @@ public class MigrationService {
|
||||
*/
|
||||
private final DbswichProperties properties;
|
||||
|
||||
/**
|
||||
* 任务执行线程池
|
||||
*/
|
||||
private final AsyncTaskExecutor taskExecutor;
|
||||
|
||||
/**
|
||||
* 构造函数
|
||||
*
|
||||
* @param properties 配置信息
|
||||
*/
|
||||
public MigrationService(DbswichProperties properties) {
|
||||
this.properties = properties;
|
||||
public MigrationService(DbswichProperties properties, AsyncTaskExecutor tableMigrationExecutor) {
|
||||
this.properties = Objects.requireNonNull(properties, "properties is null");
|
||||
this.taskExecutor = Objects.requireNonNull(tableMigrationExecutor, "taskExecutor is null");
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -78,21 +84,18 @@ public class MigrationService {
|
||||
log.info("dbswitch data service is started....");
|
||||
//log.info("Application properties configuration \n{}", properties);
|
||||
|
||||
try (HikariDataSource targetDataSource = DataSourceUtils
|
||||
.createTargetDataSource(properties.getTarget())) {
|
||||
try (HikariDataSource targetDataSource = DataSourceUtils.createTargetDataSource(properties.getTarget())) {
|
||||
int sourcePropertiesIndex = 0;
|
||||
int totalTableCount = 0;
|
||||
List<SourceDataSourceProperties> sourcesProperties = properties.getSource();
|
||||
for (SourceDataSourceProperties sourceProperties : sourcesProperties) {
|
||||
|
||||
try (HikariDataSource sourceDataSource = DataSourceUtils
|
||||
.createSourceDataSource(sourceProperties)) {
|
||||
try (HikariDataSource sourceDataSource = DataSourceUtils.createSourceDataSource(sourceProperties)) {
|
||||
IMetaDataByDatasourceService
|
||||
sourceMetaDataService = new MetaDataByDataSourceServiceImpl(sourceDataSource);
|
||||
|
||||
// 判断处理的策略:是排除还是包含
|
||||
List<String> includes = DbswitchStrUtils
|
||||
.stringToList(sourceProperties.getSourceIncludes());
|
||||
List<String> includes = DbswitchStrUtils.stringToList(sourceProperties.getSourceIncludes());
|
||||
log.info("Includes tables is :{}", jackson.writeValueAsString(includes));
|
||||
List<String> filters = DbswitchStrUtils
|
||||
.stringToList(sourceProperties.getSourceExcludes());
|
||||
@@ -138,8 +141,7 @@ public class MigrationService {
|
||||
numberOfFailures, totalBytesSize));
|
||||
}
|
||||
} else {
|
||||
if (includes.size() == 1 && (includes.get(0).contains("*") || includes.get(0)
|
||||
.contains("?"))) {
|
||||
if (includes.size() == 1 && (includes.get(0).contains("*") || includes.get(0).contains("?"))) {
|
||||
if (Pattern.matches(includes.get(0), tableName)) {
|
||||
futures.add(
|
||||
makeFutureTask(td, indexInternal, sourceDataSource, targetDataSource,
|
||||
@@ -158,20 +160,15 @@ public class MigrationService {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})).get();
|
||||
log.info(
|
||||
"#### Complete data migration for the [ {} ] data source:\ntotal count={}\nfailure count={}\ntotal bytes size={}",
|
||||
sourcePropertiesIndex, futures.size(), numberOfFailures.get(),
|
||||
BytesUnitUtils.bytesSizeToHuman(totalBytesSize.get()));
|
||||
perfStats.add(new PerfStat(sourcePropertiesIndex, futures.size(),
|
||||
numberOfFailures.get(), totalBytesSize.get()));
|
||||
++sourcePropertiesIndex;
|
||||
totalTableCount += futures.size();
|
||||
} catch (InterruptedException e) {
|
||||
log.warn(" ### Thread is interrupted , exit execute task now ......");
|
||||
throw e;
|
||||
}
|
||||
CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})).join();
|
||||
log.info(
|
||||
"#### Complete data migration for the [ {} ] data source:\ntotal count={}\nfailure count={}\ntotal bytes size={}",
|
||||
sourcePropertiesIndex, futures.size(), numberOfFailures.get(),
|
||||
BytesUnitUtils.bytesSizeToHuman(totalBytesSize.get()));
|
||||
perfStats.add(new PerfStat(sourcePropertiesIndex, futures.size(),
|
||||
numberOfFailures.get(), totalBytesSize.get()));
|
||||
++sourcePropertiesIndex;
|
||||
totalTableCount += futures.size();
|
||||
}
|
||||
}
|
||||
log.info("service run all success, total migrate table count={} ", totalTableCount);
|
||||
@@ -212,7 +209,8 @@ public class MigrationService {
|
||||
HikariDataSource tds,
|
||||
AtomicInteger numberOfFailures,
|
||||
AtomicLong totalBytesSize) {
|
||||
return CompletableFuture.supplyAsync(getMigrateHandler(td, indexInternal, sds, tds))
|
||||
return CompletableFuture
|
||||
.supplyAsync(getMigrateHandler(td, indexInternal, sds, tds), this.taskExecutor)
|
||||
.exceptionally(getExceptHandler(td, numberOfFailures))
|
||||
.thenAccept(totalBytesSize::addAndGet);
|
||||
}
|
||||
@@ -245,8 +243,8 @@ public class MigrationService {
|
||||
TableDescription td,
|
||||
AtomicInteger numberOfFailures) {
|
||||
return (e) -> {
|
||||
log.error("Error migration for table: {}.{}, error message:", td.getSchemaName(),
|
||||
td.getTableName(), e);
|
||||
log.error("Error migration for table: {}.{}, error message: {}",
|
||||
td.getSchemaName(), td.getTableName(), e.getMessage());
|
||||
numberOfFailures.incrementAndGet();
|
||||
throw new RuntimeException(e);
|
||||
};
|
||||
|
@@ -24,6 +24,9 @@ import org.springframework.jdbc.core.JdbcTemplate;
|
||||
@Slf4j
|
||||
public final class DataSourceUtils {
|
||||
|
||||
public static final int MAX_THREAD_COUNT = 10;
|
||||
public static final int MAX_TIMEOUT_MS = 60000;
|
||||
|
||||
/**
|
||||
* 创建于指定数据库连接描述符的连接池
|
||||
*
|
||||
@@ -46,11 +49,11 @@ public final class DataSourceUtils {
|
||||
} else {
|
||||
ds.setConnectionTestQuery("SELECT 1");
|
||||
}
|
||||
ds.setMaximumPoolSize(8);
|
||||
ds.setMinimumIdle(5);
|
||||
ds.setMaximumPoolSize(MAX_THREAD_COUNT);
|
||||
ds.setMinimumIdle(MAX_THREAD_COUNT);
|
||||
ds.setMaxLifetime(properties.getMaxLifeTime());
|
||||
ds.setConnectionTimeout(properties.getConnectionTimeout());
|
||||
ds.setIdleTimeout(60000);
|
||||
ds.setIdleTimeout(MAX_TIMEOUT_MS);
|
||||
|
||||
return ds;
|
||||
}
|
||||
@@ -79,11 +82,11 @@ public final class DataSourceUtils {
|
||||
} else {
|
||||
ds.setConnectionTestQuery("SELECT 1");
|
||||
}
|
||||
ds.setMaximumPoolSize(8);
|
||||
ds.setMinimumIdle(5);
|
||||
ds.setMaximumPoolSize(MAX_THREAD_COUNT);
|
||||
ds.setMinimumIdle(MAX_THREAD_COUNT);
|
||||
ds.setMaxLifetime(properties.getMaxLifeTime());
|
||||
ds.setConnectionTimeout(properties.getConnectionTimeout());
|
||||
ds.setIdleTimeout(60000);
|
||||
ds.setIdleTimeout(MAX_TIMEOUT_MS);
|
||||
|
||||
// 如果是Greenplum数据库,这里需要关闭会话的查询优化器
|
||||
if (properties.getDriverClassName().contains("postgresql")) {
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-dbchange</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-dbcommon</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-dbsynch</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-dbwriter</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-pgwriter</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>dbswitch-sql</artifactId>
|
||||
|
@@ -5,7 +5,7 @@
|
||||
<parent>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
</parent>
|
||||
|
||||
<artifactId>package-tool</artifactId>
|
||||
|
2
pom.xml
2
pom.xml
@@ -4,7 +4,7 @@
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<groupId>com.gitee.dbswitch</groupId>
|
||||
<artifactId>dbswitch-parent</artifactId>
|
||||
<version>1.6.15</version>
|
||||
<version>1.6.16</version>
|
||||
<packaging>pom</packaging>
|
||||
<name>dbswitch</name>
|
||||
<description>database switch project</description>
|
||||
|
@@ -1,6 +1,6 @@
|
||||
@echo off
|
||||
|
||||
set APP_VERSION=1.6.15
|
||||
set APP_VERSION=1.6.16
|
||||
|
||||
echo "Clean Project ..."
|
||||
call mvn clean -f pom.xml
|
||||
|
Reference in New Issue
Block a user