v1.6.15:配置任务执行的线程池

This commit is contained in:
inrgihc
2022-10-01 21:23:06 +08:00
parent e71cb3d3cc
commit 957a65a6a7
28 changed files with 179 additions and 84 deletions

View File

@@ -2,13 +2,22 @@
## 一、操作流程 ## 一、操作流程
![代码贡献操作流程](images/contribute.png) ![代码贡献操作流程](images/contribute.png)
## 二、代码格式
dbswitch使用java编码开发这里以IDEA为例指定使用google的代码风格可将项目下的intellij-java-google-style.xml文件导入到IDEA中。
![code style](images/code_style.png) ## 二、开发规范
### 1、编码规范
请遵循阿里巴巴编码规范(Java)IDEA推荐直接安装`Alibaba Java Coding Guidelines`
插件: https://plugins.jetbrains.com/plugin/10046-alibaba-java-coding-guidelines
### 2、代码格式
dbswitch使用java编码开发这里以IDEA为例指定使用google的代码风格可将项目下的intellij-java-google-style.xml文件导入到IDEA中也可到如下地址进行下载
https://github.com/google/styleguide/blob/gh-pages/intellij-java-google-style.xml
![code style](images/code_style.png)
## 二、操作步骤 ## 二、操作步骤
@@ -16,22 +25,31 @@ dbswitch使用java编码开发这里以IDEA为例指定使用google的代
首先使用gitee账号如果没有请先注册登陆后然后进入地址```https://gitee.com/inrgihc/dbswitch```中,点击```fork```按钮在自己账号下复制一份dbswitch项目如下图 首先使用gitee账号如果没有请先注册登陆后然后进入地址```https://gitee.com/inrgihc/dbswitch```中,点击```fork```按钮在自己账号下复制一份dbswitch项目如下图
![Fork dbswitch](images/git_fork.png) ![Fork dbswitch](images/git_fork.png)
### 2、Clone 自己的项目 ### 2、Clone 自己的项目
- (1) 拉取远端的代码到本地 - (1) 拉取远端的代码到本地
``` ```
git clone -b dev https://gitee.com/tangyibo/dbswitch.git git clone -b master https://gitee.com/tangyibo/dbswitch.git
cd dbswitch/ cd dbswitch/
git checkout -b dev
``` ```
**注请替换上述中的tangyibo为您gitee的实际账号信息**
- (2) 请正确配置自己的信息(此信息会包含在后面的commit提交信息中) - (2) 请正确配置自己的信息(此信息会包含在后面的commit提交信息中)
``` ```
git config --local user.name xxxx git config --local user.name xxxx
git config --local user.email xxxx@126.com git config --local user.email xxxx@126.com
``` ```
**注请替换上述中的xxxx为实际的信息** **注请替换上述中的xxxx为实际的信息**
- (3) 配置远端dbswitch项目的名称(这里假设为upstream) - (3) 配置远端dbswitch项目的名称(这里假设为upstream)
``` ```
git remote add upstream https://gitee.com/inrgihc/dbswitch.git git remote add upstream https://gitee.com/inrgihc/dbswitch.git
``` ```
@@ -41,16 +59,19 @@ git remote add upstream https://gitee.com/inrgihc/dbswitch.git
- (1) 在本地向自己的分支中提交代码: - (1) 在本地向自己的分支中提交代码:
- (2) fetch 远端dbswitch的最新代码 - (2) fetch 远端dbswitch的最新代码
``` ```
git fetch upstream master git fetch upstream master
``` ```
- (3) rebase 远端dbswitch的最新代码(处理可能存在的冲突) - (3) rebase 远端dbswitch的最新代码(处理可能存在的冲突)
``` ```
git rebase upstream/master git rebase upstream/master
``` ```
- (4) push 本地代码到远端 - (4) push 本地代码到远端
``` ```
git push git push
``` ```
@@ -63,10 +84,10 @@ git push
在自己的分支上创建pull requests: 在自己的分支上创建pull requests:
![PR dbswitch](images/git_pr.png) ![PR dbswitch](images/git_pr.png)
选择对应的分支: 选择对应的分支:
![PR dbswitch](images/git_select.png) ![PR dbswitch](images/git_select.png)
填写好“标题”和“描述”点击“创建Pull Request"即可完成代码提交合入申请操作审核通过后您成为dbswitch项目贡献者。 填写好“标题”和“描述”点击“创建Pull Request"即可完成代码提交合入申请操作审核通过后您成为dbswitch项目贡献者。

View File

@@ -2,7 +2,7 @@
set -e set -e
DBSWITCH_VERSION=1.6.15 DBSWITCH_VERSION=1.6.16
BUILD_DOCKER_DIR="$( cd "$( dirname "$0" )" && pwd )" BUILD_DOCKER_DIR="$( cd "$( dirname "$0" )" && pwd )"
PROJECT_ROOT_DIR=$( dirname "$BUILD_DOCKER_DIR") PROJECT_ROOT_DIR=$( dirname "$BUILD_DOCKER_DIR")
DOCKER_DBSWITCH_DIR=$BUILD_DOCKER_DIR/dbswitch DOCKER_DBSWITCH_DIR=$BUILD_DOCKER_DIR/dbswitch

View File

@@ -13,7 +13,7 @@ services:
MYSQL_ROOT_HOST: '%' MYSQL_ROOT_HOST: '%'
dbswitch: dbswitch:
container_name: dbswitch_webui container_name: dbswitch_webui
image: inrgihc/dbswitch:1.6.15 image: inrgihc/dbswitch:1.6.16
environment: environment:
MYSQLDB_HOST: dbswitch_mysqldb MYSQLDB_HOST: dbswitch_mysqldb
MYSQLDB_PORT: 3306 MYSQLDB_PORT: 3306

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-admin</artifactId> <artifactId>dbswitch-admin</artifactId>

View File

@@ -13,7 +13,7 @@ import org.springframework.context.annotation.Configuration;
import org.springframework.web.servlet.config.annotation.CorsRegistry; import org.springframework.web.servlet.config.annotation.CorsRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration @Configuration("dbswitchCorsConfig")
public class CorsConfig implements WebMvcConfigurer { public class CorsConfig implements WebMvcConfigurer {
@Override @Override

View File

@@ -0,0 +1,35 @@
package com.gitee.dbswitch.admin.config;
import com.gitee.dbswitch.data.util.DataSourceUtils;
import java.util.concurrent.ThreadPoolExecutor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
@Configuration("dbswitchExecutorConfig")
public class ExecutorConfig {
public final static String TASK_EXECUTOR_BEAN_NAME = "migrationTaskExecutor";
/**
* 创建一个异步任务执行ThreadPoolTaskExecutor
*
* @return ThreadPoolTaskExecutor
*/
@Bean(TASK_EXECUTOR_BEAN_NAME)
public AsyncTaskExecutor createTableMigrationTaskExecutor() {
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
taskExecutor.setCorePoolSize(DataSourceUtils.MAX_THREAD_COUNT);
taskExecutor.setMaxPoolSize(DataSourceUtils.MAX_THREAD_COUNT);
taskExecutor.setQueueCapacity(10000);
taskExecutor.setKeepAliveSeconds(1800);
taskExecutor.setDaemon(true);
taskExecutor.setThreadGroupName("dbswitch");
taskExecutor.setThreadNamePrefix("dbswitch-migration-");
taskExecutor.setBeanName(TASK_EXECUTOR_BEAN_NAME);
taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
taskExecutor.initialize();
return taskExecutor;
}
}

View File

@@ -29,7 +29,7 @@ import org.springframework.web.servlet.HandlerInterceptor;
import org.springframework.web.servlet.config.annotation.InterceptorRegistry; import org.springframework.web.servlet.config.annotation.InterceptorRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration @Configuration("dbswitchInterceptorConfig")
public class InterceptorConfig implements WebMvcConfigurer { public class InterceptorConfig implements WebMvcConfigurer {
@Override @Override

View File

@@ -25,7 +25,7 @@ import org.springframework.scheduling.quartz.SchedulerFactoryBean;
import org.springframework.scheduling.quartz.SpringBeanJobFactory; import org.springframework.scheduling.quartz.SpringBeanJobFactory;
@Slf4j @Slf4j
@Configuration @Configuration("dbswitchQuartzConfig")
public class QuartzConfig { public class QuartzConfig {
@Bean @Bean
@@ -63,7 +63,6 @@ public class QuartzConfig {
// Configure JobStore 作业存储配置 // Configure JobStore 作业存储配置
///////////////////////////////////// /////////////////////////////////////
prop.put("org.quartz.jobStore.misfireThreshold", "60000");
// 数据库方式 JobStore配置 // 数据库方式 JobStore配置
prop.put("org.quartz.jobStore.class", "org.quartz.impl.jdbcjobstore.JobStoreTX"); prop.put("org.quartz.jobStore.class", "org.quartz.impl.jdbcjobstore.JobStoreTX");
//持久化方式配置数据驱动 //持久化方式配置数据驱动

View File

@@ -26,7 +26,7 @@ import springfox.documentation.spi.DocumentationType;
import springfox.documentation.spring.web.plugins.Docket; import springfox.documentation.spring.web.plugins.Docket;
import springfox.documentation.swagger2.annotations.EnableSwagger2; import springfox.documentation.swagger2.annotations.EnableSwagger2;
@Configuration @Configuration("dbswitchSwaggerConfig")
@EnableSwagger2 @EnableSwagger2
public class SwaggerConfig { public class SwaggerConfig {

View File

@@ -14,7 +14,7 @@ import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry
import org.springframework.web.servlet.config.annotation.ViewControllerRegistry; import org.springframework.web.servlet.config.annotation.ViewControllerRegistry;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
@Configuration @Configuration("dbswitchWebMvcConfig")
public class WebMvcConfig implements WebMvcConfigurer { public class WebMvcConfig implements WebMvcConfigurer {
@Override @Override

View File

@@ -36,12 +36,15 @@ import org.quartz.JobExecutionException;
import org.quartz.JobKey; import org.quartz.JobKey;
import org.quartz.PersistJobDataAfterExecution; import org.quartz.PersistJobDataAfterExecution;
import org.quartz.UnableToInterruptJobException; import org.quartz.UnableToInterruptJobException;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.scheduling.quartz.QuartzJobBean; import org.springframework.scheduling.quartz.QuartzJobBean;
/** /**
* <p> * <p>
* 如果你使用了@PersistJobDataAfterExecution注解则强烈建议你同时使用@DisallowConcurrentExecution注 解,因为当同一个jobJobDetail的两个实例被并发执行时由于竞争JobDataMap中存储的数据很可能是不确定的。 * 如果你使用了@PersistJobDataAfterExecution注解则强烈建议你同时使用@DisallowConcurrentExecution注解
* </p> * <p>
* 因为当同一个jobJobDetail的两个实例被并发执行时由于竞争JobDataMap中存储的数据很可能是不确定的。
* <p>
*/ */
@Slf4j @Slf4j
@PersistJobDataAfterExecution @PersistJobDataAfterExecution
@@ -84,6 +87,9 @@ public class JobExecutorService extends QuartzJobBean implements InterruptableJo
@Resource @Resource
private AssignmentJobDAO assignmentJobDAO; private AssignmentJobDAO assignmentJobDAO;
@Resource
private AsyncTaskExecutor migrationTaskExecutor;
/** /**
* 实现setter方法Quartz会给成员变量taskId注入值 * 实现setter方法Quartz会给成员变量taskId注入值
* *
@@ -146,7 +152,7 @@ public class JobExecutorService extends QuartzJobBean implements InterruptableJo
properties.getTarget().setChangeDataSync(true); properties.getTarget().setChangeDataSync(true);
} }
MigrationService mainService = new MigrationService(properties); MigrationService mainService = new MigrationService(properties, migrationTaskExecutor);
if (interrupted) { if (interrupted) {
log.info("Quartz task id:{} interrupted when prepare stage", jobDataMap.getLong(TASK_ID)); log.info("Quartz task id:{} interrupted when prepare stage", jobDataMap.getLong(TASK_ID));
return; return;

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-common</artifactId> <artifactId>dbswitch-common</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-core</artifactId> <artifactId>dbswitch-core</artifactId>

View File

@@ -23,7 +23,7 @@ import java.sql.ResultSetMetaData;
import java.sql.SQLException; import java.sql.SQLException;
import java.sql.Statement; import java.sql.Statement;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashSet; import java.util.LinkedHashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
@@ -56,7 +56,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
@Override @Override
public List<String> querySchemaList(Connection connection) { public List<String> querySchemaList(Connection connection) {
Set<String> ret = new HashSet<>(); Set<String> ret = new LinkedHashSet<>();
try (ResultSet schemas = connection.getMetaData().getSchemas()) { try (ResultSet schemas = connection.getMetaData().getSchemas()) {
while (schemas.next()) { while (schemas.next()) {
ret.add(schemas.getString("TABLE_SCHEM")); ret.add(schemas.getString("TABLE_SCHEM"));
@@ -70,7 +70,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
@Override @Override
public List<TableDescription> queryTableList(Connection connection, String schemaName) { public List<TableDescription> queryTableList(Connection connection, String schemaName) {
List<TableDescription> ret = new ArrayList<>(); List<TableDescription> ret = new ArrayList<>();
Set<String> uniqueSet = new HashSet<>(); Set<String> uniqueSet = new LinkedHashSet<>();
String[] types = new String[]{"TABLE", "VIEW"}; String[] types = new String[]{"TABLE", "VIEW"};
try (ResultSet tables = connection.getMetaData() try (ResultSet tables = connection.getMetaData()
.getTables(this.catalogName, schemaName, "%", types)) { .getTables(this.catalogName, schemaName, "%", types)) {
@@ -106,7 +106,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
@Override @Override
public List<String> queryTableColumnName(Connection connection, String schemaName, public List<String> queryTableColumnName(Connection connection, String schemaName,
String tableName) { String tableName) {
Set<String> columns = new HashSet<>(); Set<String> columns = new LinkedHashSet<>();
try (ResultSet rs = connection.getMetaData() try (ResultSet rs = connection.getMetaData()
.getColumns(this.catalogName, schemaName, tableName, null)) { .getColumns(this.catalogName, schemaName, tableName, null)) {
while (rs.next()) { while (rs.next()) {
@@ -145,14 +145,11 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
@Override @Override
public List<String> queryTablePrimaryKeys(Connection connection, String schemaName, public List<String> queryTablePrimaryKeys(Connection connection, String schemaName,
String tableName) { String tableName) {
Set<String> ret = new HashSet<>(); Set<String> ret = new LinkedHashSet<>();
try (ResultSet primaryKeys = connection.getMetaData() try (ResultSet primaryKeys = connection.getMetaData()
.getPrimaryKeys(this.catalogName, schemaName, tableName)) { .getPrimaryKeys(this.catalogName, schemaName, tableName)) {
while (primaryKeys.next()) { while (primaryKeys.next()) {
String name = primaryKeys.getString("COLUMN_NAME"); ret.add(primaryKeys.getString("COLUMN_NAME"));
if (!ret.contains(name)) {
ret.add(name);
}
} }
return new ArrayList<>(ret); return new ArrayList<>(ret);
} catch (SQLException e) { } catch (SQLException e) {
@@ -187,11 +184,11 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
List<Object> row = new ArrayList<>(count); List<Object> row = new ArrayList<>(count);
for (int i = 1; i <= count; i++) { for (int i = 1; i <= count; i++) {
Object value = rs.getObject(i); Object value = rs.getObject(i);
if (value != null && value instanceof byte[]) { if (value instanceof byte[]) {
row.add(DbswitchStrUtils.toHexString((byte[]) value)); row.add(DbswitchStrUtils.toHexString((byte[]) value));
} else if (value != null && value instanceof java.sql.Clob) { } else if (value instanceof java.sql.Clob) {
row.add(TypeConvertUtils.castToString(value)); row.add(TypeConvertUtils.castToString(value));
} else if (value != null && value instanceof java.sql.Blob) { } else if (value instanceof java.sql.Blob) {
byte[] bytes = TypeConvertUtils.castToByteArray(value); byte[] bytes = TypeConvertUtils.castToByteArray(value);
row.add(DbswitchStrUtils.toHexString(bytes)); row.add(DbswitchStrUtils.toHexString(bytes));
} else { } else {
@@ -211,7 +208,7 @@ public abstract class AbstractDatabase implements IDatabaseInterface {
@Override @Override
public void testQuerySQL(Connection connection, String sql) { public void testQuerySQL(Connection connection, String sql) {
String wrapperSql = this.getTestQuerySQL(sql); String wrapperSql = this.getTestQuerySQL(sql);
try (Statement statement = connection.createStatement();) { try (Statement statement = connection.createStatement()) {
statement.execute(wrapperSql); statement.execute(wrapperSql);
} catch (SQLException e) { } catch (SQLException e) {
throw new RuntimeException(e); throw new RuntimeException(e);

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-data</artifactId> <artifactId>dbswitch-data</artifactId>

View File

@@ -0,0 +1,36 @@
package com.gitee.dbswitch.data.config;
import com.gitee.dbswitch.data.util.DataSourceUtils;
import java.util.concurrent.ThreadPoolExecutor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
@Configuration("dbswitchTaskExecutorConfig")
public class TaskExecutorConfig {
public final static String TASK_EXECUTOR_BEAN_NAME = "tableMigrationExecutor";
/**
* 创建一个异步任务执行ThreadPoolTaskExecutor
*
* @return ThreadPoolTaskExecutor
*/
@Bean(TASK_EXECUTOR_BEAN_NAME)
public AsyncTaskExecutor createTableMigrationTaskExecutor() {
ThreadPoolTaskExecutor taskExecutor = new ThreadPoolTaskExecutor();
taskExecutor.setCorePoolSize(DataSourceUtils.MAX_THREAD_COUNT);
taskExecutor.setMaxPoolSize(DataSourceUtils.MAX_THREAD_COUNT);
taskExecutor.setQueueCapacity(10000);
taskExecutor.setKeepAliveSeconds(1800);
taskExecutor.setDaemon(true);
taskExecutor.setThreadGroupName("dbswitch");
taskExecutor.setThreadNamePrefix("dbswitch-migration-");
taskExecutor.setBeanName(TASK_EXECUTOR_BEAN_NAME);
taskExecutor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());
taskExecutor.initialize();
return taskExecutor;
}
}

View File

@@ -20,7 +20,7 @@ public class TargetDataSourceProperties {
private String username; private String username;
private String password; private String password;
private Long connectionTimeout = TimeUnit.SECONDS.toMillis(60); private Long connectionTimeout = TimeUnit.SECONDS.toMillis(60);
private Long maxLifeTime = TimeUnit.MINUTES.toMillis(60); private Long maxLifeTime = TimeUnit.MINUTES.toMillis(30);
private String targetSchema = ""; private String targetSchema = "";
private Boolean targetDrop = Boolean.TRUE; private Boolean targetDrop = Boolean.TRUE;

View File

@@ -10,7 +10,6 @@
package com.gitee.dbswitch.data.service; package com.gitee.dbswitch.data.service;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.gitee.dbswitch.common.type.DBTableType;
import com.gitee.dbswitch.common.util.DbswitchStrUtils; import com.gitee.dbswitch.common.util.DbswitchStrUtils;
import com.gitee.dbswitch.core.model.TableDescription; import com.gitee.dbswitch.core.model.TableDescription;
import com.gitee.dbswitch.core.service.IMetaDataByDatasourceService; import com.gitee.dbswitch.core.service.IMetaDataByDatasourceService;
@@ -24,6 +23,7 @@ import com.gitee.dbswitch.data.util.DataSourceUtils;
import com.zaxxer.hikari.HikariDataSource; import com.zaxxer.hikari.HikariDataSource;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
@@ -31,9 +31,9 @@ import java.util.function.Function;
import java.util.function.Supplier; import java.util.function.Supplier;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.springframework.core.task.AsyncTaskExecutor;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import org.springframework.util.StopWatch; import org.springframework.util.StopWatch;
import org.springframework.util.StringUtils;
/** /**
* 数据迁移主逻辑类 * 数据迁移主逻辑类
@@ -59,13 +59,19 @@ public class MigrationService {
*/ */
private final DbswichProperties properties; private final DbswichProperties properties;
/**
* 任务执行线程池
*/
private final AsyncTaskExecutor taskExecutor;
/** /**
* 构造函数 * 构造函数
* *
* @param properties 配置信息 * @param properties 配置信息
*/ */
public MigrationService(DbswichProperties properties) { public MigrationService(DbswichProperties properties, AsyncTaskExecutor tableMigrationExecutor) {
this.properties = properties; this.properties = Objects.requireNonNull(properties, "properties is null");
this.taskExecutor = Objects.requireNonNull(tableMigrationExecutor, "taskExecutor is null");
} }
/** /**
@@ -78,21 +84,18 @@ public class MigrationService {
log.info("dbswitch data service is started...."); log.info("dbswitch data service is started....");
//log.info("Application properties configuration \n{}", properties); //log.info("Application properties configuration \n{}", properties);
try (HikariDataSource targetDataSource = DataSourceUtils try (HikariDataSource targetDataSource = DataSourceUtils.createTargetDataSource(properties.getTarget())) {
.createTargetDataSource(properties.getTarget())) {
int sourcePropertiesIndex = 0; int sourcePropertiesIndex = 0;
int totalTableCount = 0; int totalTableCount = 0;
List<SourceDataSourceProperties> sourcesProperties = properties.getSource(); List<SourceDataSourceProperties> sourcesProperties = properties.getSource();
for (SourceDataSourceProperties sourceProperties : sourcesProperties) { for (SourceDataSourceProperties sourceProperties : sourcesProperties) {
try (HikariDataSource sourceDataSource = DataSourceUtils try (HikariDataSource sourceDataSource = DataSourceUtils.createSourceDataSource(sourceProperties)) {
.createSourceDataSource(sourceProperties)) {
IMetaDataByDatasourceService IMetaDataByDatasourceService
sourceMetaDataService = new MetaDataByDataSourceServiceImpl(sourceDataSource); sourceMetaDataService = new MetaDataByDataSourceServiceImpl(sourceDataSource);
// 判断处理的策略:是排除还是包含 // 判断处理的策略:是排除还是包含
List<String> includes = DbswitchStrUtils List<String> includes = DbswitchStrUtils.stringToList(sourceProperties.getSourceIncludes());
.stringToList(sourceProperties.getSourceIncludes());
log.info("Includes tables is :{}", jackson.writeValueAsString(includes)); log.info("Includes tables is :{}", jackson.writeValueAsString(includes));
List<String> filters = DbswitchStrUtils List<String> filters = DbswitchStrUtils
.stringToList(sourceProperties.getSourceExcludes()); .stringToList(sourceProperties.getSourceExcludes());
@@ -138,8 +141,7 @@ public class MigrationService {
numberOfFailures, totalBytesSize)); numberOfFailures, totalBytesSize));
} }
} else { } else {
if (includes.size() == 1 && (includes.get(0).contains("*") || includes.get(0) if (includes.size() == 1 && (includes.get(0).contains("*") || includes.get(0).contains("?"))) {
.contains("?"))) {
if (Pattern.matches(includes.get(0), tableName)) { if (Pattern.matches(includes.get(0), tableName)) {
futures.add( futures.add(
makeFutureTask(td, indexInternal, sourceDataSource, targetDataSource, makeFutureTask(td, indexInternal, sourceDataSource, targetDataSource,
@@ -158,20 +160,15 @@ public class MigrationService {
} }
try { CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})).join();
CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})).get(); log.info(
log.info( "#### Complete data migration for the [ {} ] data source:\ntotal count={}\nfailure count={}\ntotal bytes size={}",
"#### Complete data migration for the [ {} ] data source:\ntotal count={}\nfailure count={}\ntotal bytes size={}", sourcePropertiesIndex, futures.size(), numberOfFailures.get(),
sourcePropertiesIndex, futures.size(), numberOfFailures.get(), BytesUnitUtils.bytesSizeToHuman(totalBytesSize.get()));
BytesUnitUtils.bytesSizeToHuman(totalBytesSize.get())); perfStats.add(new PerfStat(sourcePropertiesIndex, futures.size(),
perfStats.add(new PerfStat(sourcePropertiesIndex, futures.size(), numberOfFailures.get(), totalBytesSize.get()));
numberOfFailures.get(), totalBytesSize.get())); ++sourcePropertiesIndex;
++sourcePropertiesIndex; totalTableCount += futures.size();
totalTableCount += futures.size();
} catch (InterruptedException e) {
log.warn(" ### Thread is interrupted , exit execute task now ......");
throw e;
}
} }
} }
log.info("service run all success, total migrate table count={} ", totalTableCount); log.info("service run all success, total migrate table count={} ", totalTableCount);
@@ -212,7 +209,8 @@ public class MigrationService {
HikariDataSource tds, HikariDataSource tds,
AtomicInteger numberOfFailures, AtomicInteger numberOfFailures,
AtomicLong totalBytesSize) { AtomicLong totalBytesSize) {
return CompletableFuture.supplyAsync(getMigrateHandler(td, indexInternal, sds, tds)) return CompletableFuture
.supplyAsync(getMigrateHandler(td, indexInternal, sds, tds), this.taskExecutor)
.exceptionally(getExceptHandler(td, numberOfFailures)) .exceptionally(getExceptHandler(td, numberOfFailures))
.thenAccept(totalBytesSize::addAndGet); .thenAccept(totalBytesSize::addAndGet);
} }
@@ -245,8 +243,8 @@ public class MigrationService {
TableDescription td, TableDescription td,
AtomicInteger numberOfFailures) { AtomicInteger numberOfFailures) {
return (e) -> { return (e) -> {
log.error("Error migration for table: {}.{}, error message:", td.getSchemaName(), log.error("Error migration for table: {}.{}, error message: {}",
td.getTableName(), e); td.getSchemaName(), td.getTableName(), e.getMessage());
numberOfFailures.incrementAndGet(); numberOfFailures.incrementAndGet();
throw new RuntimeException(e); throw new RuntimeException(e);
}; };

View File

@@ -24,6 +24,9 @@ import org.springframework.jdbc.core.JdbcTemplate;
@Slf4j @Slf4j
public final class DataSourceUtils { public final class DataSourceUtils {
public static final int MAX_THREAD_COUNT = 10;
public static final int MAX_TIMEOUT_MS = 60000;
/** /**
* 创建于指定数据库连接描述符的连接池 * 创建于指定数据库连接描述符的连接池
* *
@@ -46,11 +49,11 @@ public final class DataSourceUtils {
} else { } else {
ds.setConnectionTestQuery("SELECT 1"); ds.setConnectionTestQuery("SELECT 1");
} }
ds.setMaximumPoolSize(8); ds.setMaximumPoolSize(MAX_THREAD_COUNT);
ds.setMinimumIdle(5); ds.setMinimumIdle(MAX_THREAD_COUNT);
ds.setMaxLifetime(properties.getMaxLifeTime()); ds.setMaxLifetime(properties.getMaxLifeTime());
ds.setConnectionTimeout(properties.getConnectionTimeout()); ds.setConnectionTimeout(properties.getConnectionTimeout());
ds.setIdleTimeout(60000); ds.setIdleTimeout(MAX_TIMEOUT_MS);
return ds; return ds;
} }
@@ -79,11 +82,11 @@ public final class DataSourceUtils {
} else { } else {
ds.setConnectionTestQuery("SELECT 1"); ds.setConnectionTestQuery("SELECT 1");
} }
ds.setMaximumPoolSize(8); ds.setMaximumPoolSize(MAX_THREAD_COUNT);
ds.setMinimumIdle(5); ds.setMinimumIdle(MAX_THREAD_COUNT);
ds.setMaxLifetime(properties.getMaxLifeTime()); ds.setMaxLifetime(properties.getMaxLifeTime());
ds.setConnectionTimeout(properties.getConnectionTimeout()); ds.setConnectionTimeout(properties.getConnectionTimeout());
ds.setIdleTimeout(60000); ds.setIdleTimeout(MAX_TIMEOUT_MS);
// 如果是Greenplum数据库这里需要关闭会话的查询优化器 // 如果是Greenplum数据库这里需要关闭会话的查询优化器
if (properties.getDriverClassName().contains("postgresql")) { if (properties.getDriverClassName().contains("postgresql")) {

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-dbchange</artifactId> <artifactId>dbswitch-dbchange</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-dbcommon</artifactId> <artifactId>dbswitch-dbcommon</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-dbsynch</artifactId> <artifactId>dbswitch-dbsynch</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-dbwriter</artifactId> <artifactId>dbswitch-dbwriter</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-pgwriter</artifactId> <artifactId>dbswitch-pgwriter</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>dbswitch-sql</artifactId> <artifactId>dbswitch-sql</artifactId>

View File

@@ -5,7 +5,7 @@
<parent> <parent>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
</parent> </parent>
<artifactId>package-tool</artifactId> <artifactId>package-tool</artifactId>

View File

@@ -4,7 +4,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<groupId>com.gitee.dbswitch</groupId> <groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId> <artifactId>dbswitch-parent</artifactId>
<version>1.6.15</version> <version>1.6.16</version>
<packaging>pom</packaging> <packaging>pom</packaging>
<name>dbswitch</name> <name>dbswitch</name>
<description>database switch project</description> <description>database switch project</description>

View File

@@ -1,6 +1,6 @@
@echo off @echo off
set APP_VERSION=1.6.15 set APP_VERSION=1.6.16
echo "Clean Project ..." echo "Clean Project ..."
call mvn clean -f pom.xml call mvn clean -f pom.xml