!86 version for 1.6.6

* version for 1.6.6
This commit is contained in:
inrgihc
2022-03-16 14:56:26 +00:00
parent 3268385b2a
commit 0312a4b1a4
134 changed files with 3680 additions and 1956 deletions

View File

@@ -5,7 +5,7 @@
<parent>
<groupId>com.gitee.dbswitch</groupId>
<artifactId>dbswitch-parent</artifactId>
<version>1.6.5</version>
<version>1.6.6</version>
</parent>
<artifactId>dbswitch-data</artifactId>

View File

@@ -9,6 +9,8 @@
/////////////////////////////////////////////////////////////
package com.gitee.dbswitch.data.config;
import com.gitee.dbswitch.data.entity.SourceDataSourceProperties;
import com.gitee.dbswitch.data.entity.TargetDataSourceProperties;
import java.util.ArrayList;
import java.util.List;
import lombok.Data;
@@ -33,34 +35,4 @@ public class DbswichProperties {
private List<SourceDataSourceProperties> source = new ArrayList<>();
private TargetDataSourceProperties target = new TargetDataSourceProperties();
@Data
public static class SourceDataSourceProperties {
private String url;
private String driverClassName;
private String username;
private String password;
private Integer fetchSize = 5000;
private String sourceSchema = "";
private String prefixTable = "";
private String sourceIncludes = "";
private String sourceExcludes = "";
}
@Data
public static class TargetDataSourceProperties {
private String url;
private String driverClassName;
private String username;
private String password;
private String targetSchema = "";
private Boolean targetDrop = Boolean.TRUE;
private Boolean createTableAutoIncrement = Boolean.FALSE;
private Boolean writerEngineInsert = Boolean.FALSE;
private Boolean changeDataSync = Boolean.FALSE;
}
}

View File

@@ -0,0 +1,33 @@
// Copyright tang. All rights reserved.
// https://gitee.com/inrgihc/dbswitch
//
// Use of this source code is governed by a BSD-style license
//
// Author: tang (inrgihc@126.com)
// Date : 2020/1/2
// Location: beijing , china
/////////////////////////////////////////////////////////////
package com.gitee.dbswitch.data.entity;
import com.gitee.dbswitch.common.entity.PatternMapper;
import java.util.List;
import java.util.concurrent.TimeUnit;
import lombok.Data;
@Data
public class SourceDataSourceProperties {
private String url;
private String driverClassName;
private String username;
private String password;
private Long connectionTimeout = TimeUnit.SECONDS.toMillis(60);
private Long maxLifeTime = TimeUnit.MINUTES.toMillis(60);
private Integer fetchSize = 5000;
private String sourceSchema = "";
private String sourceIncludes = "";
private String sourceExcludes = "";
private List<PatternMapper> regexTableMapper;
private List<PatternMapper> regexColumnMapper;
}

View File

@@ -0,0 +1,30 @@
// Copyright tang. All rights reserved.
// https://gitee.com/inrgihc/dbswitch
//
// Use of this source code is governed by a BSD-style license
//
// Author: tang (inrgihc@126.com)
// Date : 2020/1/2
// Location: beijing , china
/////////////////////////////////////////////////////////////
package com.gitee.dbswitch.data.entity;
import java.util.concurrent.TimeUnit;
import lombok.Data;
@Data
public class TargetDataSourceProperties {
private String url;
private String driverClassName;
private String username;
private String password;
private Long connectionTimeout = TimeUnit.SECONDS.toMillis(60);
private Long maxLifeTime = TimeUnit.MINUTES.toMillis(60);
private String targetSchema = "";
private Boolean targetDrop = Boolean.TRUE;
private Boolean createTableAutoIncrement = Boolean.FALSE;
private Boolean writerEngineInsert = Boolean.FALSE;
private Boolean changeDataSync = Boolean.FALSE;
}

View File

@@ -10,14 +10,15 @@
package com.gitee.dbswitch.data.handler;
import com.gitee.dbswitch.common.type.DatabaseTypeEnum;
import com.gitee.dbswitch.common.util.CommonUtils;
import com.gitee.dbswitch.common.util.DatabaseAwareUtils;
import com.gitee.dbswitch.common.util.PatterNameUtils;
import com.gitee.dbswitch.core.model.ColumnDescription;
import com.gitee.dbswitch.core.model.TableDescription;
import com.gitee.dbswitch.core.service.IMetaDataService;
import com.gitee.dbswitch.core.service.impl.MigrationMetaDataServiceImpl;
import com.gitee.dbswitch.core.service.IMetaDataByDatasourceService;
import com.gitee.dbswitch.core.service.impl.MetaDataByDataSourceServiceImpl;
import com.gitee.dbswitch.data.config.DbswichProperties;
import com.gitee.dbswitch.data.entity.SourceDataSourceProperties;
import com.gitee.dbswitch.data.util.BytesUnitUtils;
import com.gitee.dbswitch.data.util.JdbcTemplateUtils;
import com.gitee.dbswitch.dbchange.ChangeCalculatorService;
import com.gitee.dbswitch.dbchange.IDatabaseChangeCaculator;
import com.gitee.dbswitch.dbchange.IDatabaseRowHandler;
@@ -26,7 +27,6 @@ import com.gitee.dbswitch.dbchange.TaskParamEntity;
import com.gitee.dbswitch.dbcommon.database.DatabaseOperatorFactory;
import com.gitee.dbswitch.dbcommon.database.IDatabaseOperator;
import com.gitee.dbswitch.dbcommon.domain.StatementResultSet;
import com.gitee.dbswitch.dbcommon.util.JdbcMetaDataUtils;
import com.gitee.dbswitch.dbsynch.DatabaseSynchronizeFactory;
import com.gitee.dbswitch.dbsynch.IDatabaseSynchronize;
import com.gitee.dbswitch.dbwriter.DatabaseWriterFactory;
@@ -34,11 +34,15 @@ import com.gitee.dbswitch.dbwriter.IDatabaseWriter;
import com.zaxxer.hikari.HikariDataSource;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.ehcache.sizeof.SizeOf;
import org.springframework.jdbc.core.JdbcTemplate;
@@ -54,12 +58,29 @@ public class MigrationHandler implements Supplier<Long> {
private final long MAX_CACHE_BYTES_SIZE = 64 * 1024 * 1024;
private int fetchSize = 100;
private final TableDescription tableDescription;
private final DbswichProperties properties;
private final DbswichProperties.SourceDataSourceProperties sourceProperties;
private final SourceDataSourceProperties sourceProperties;
// 来源端
private final HikariDataSource sourceDataSource;
private final IMetaDataService sourceMetaDataService;
private DatabaseTypeEnum sourceProductType;
private String sourceSchemaName;
private String sourceTableName;
private List<ColumnDescription> sourceColumnDescriptions;
private List<String> sourcePrimaryKeys;
private IMetaDataByDatasourceService sourceMetaDataService;
// 目的端
private final HikariDataSource targetDataSource;
private DatabaseTypeEnum targetProductType;
private String targetSchemaName;
private String targetTableName;
private List<ColumnDescription> targetColumnDescriptions;
private List<String> targetPrimaryKeys;
// 日志输出字符串使用
private String tableNameMapString;
public static MigrationHandler createInstance(TableDescription td,
DbswichProperties properties,
@@ -74,101 +95,126 @@ public class MigrationHandler implements Supplier<Long> {
Integer sourcePropertiesIndex,
HikariDataSource sds,
HikariDataSource tds) {
this.tableDescription = td;
this.sourceSchemaName = td.getSchemaName();
this.sourceTableName = td.getTableName();
this.properties = properties;
this.sourceProperties = properties.getSource().get(sourcePropertiesIndex);
this.sourceDataSource = sds;
this.sourceMetaDataService = new MigrationMetaDataServiceImpl();
this.targetDataSource = tds;
if (sourceProperties.getFetchSize() >= fetchSize) {
fetchSize = sourceProperties.getFetchSize();
}
this.sourceMetaDataService
.setDatabaseConnection(JdbcTemplateUtils.getDatabaseProduceName(sourceDataSource));
// 获取映射转换后新的表名
this.targetSchemaName = properties.getTarget().getTargetSchema();
this.targetTableName = PatterNameUtils.getFinalName(td.getTableName(),
sourceProperties.getRegexTableMapper());
this.tableNameMapString = String.format("%s.%s --> %s.%s",
td.getSchemaName(), td.getTableName(),
targetSchemaName, targetTableName);
}
@Override
public Long get() {
log.info("Begin Migrate table for {}.{} ", tableDescription.getSchemaName(),
tableDescription.getTableName());
log.info("Begin Migrate table for {}", tableNameMapString);
JdbcTemplate targetJdbcTemplate = new JdbcTemplate(targetDataSource);
DatabaseTypeEnum targetDatabaseType = JdbcTemplateUtils
.getDatabaseProduceName(targetDataSource);
IDatabaseWriter writer = DatabaseWriterFactory.createDatabaseWriter(targetDataSource,
properties.getTarget().getWriterEngineInsert());
this.sourceProductType = DatabaseAwareUtils.getDatabaseTypeByDataSource(sourceDataSource);
this.targetProductType = DatabaseAwareUtils.getDatabaseTypeByDataSource(targetDataSource);
this.sourceMetaDataService = new MetaDataByDataSourceServiceImpl(sourceDataSource,
sourceProductType);
// 读取源表的字段元数据
this.sourceColumnDescriptions = sourceMetaDataService
.queryTableColumnMeta(sourceSchemaName, sourceTableName);
this.sourcePrimaryKeys = sourceMetaDataService
.queryTablePrimaryKeys(sourceSchemaName, sourceTableName);
// 根据表的列名映射转换准备目标端表的字段信息
this.targetColumnDescriptions = sourceColumnDescriptions.stream()
.map(column -> {
String newName = PatterNameUtils.getFinalName(
column.getFieldName(),
sourceProperties.getRegexColumnMapper());
ColumnDescription description = column.copy();
description.setFieldName(newName);
description.setLabelName(newName);
return description;
}).collect(Collectors.toList());
this.targetPrimaryKeys = sourcePrimaryKeys.stream()
.map(name ->
PatterNameUtils.getFinalName(name, sourceProperties.getRegexColumnMapper())
).collect(Collectors.toList());
// 打印表名与字段名的映射关系
List<String> columnMapperPairs = new ArrayList<>();
Map<String, String> mapChecker = new HashMap<>();
for (int i = 0; i < sourceColumnDescriptions.size(); ++i) {
String sourceColumnName = sourceColumnDescriptions.get(i).getFieldName();
String targetColumnName = targetColumnDescriptions.get(i).getFieldName();
columnMapperPairs.add(String.format("%s --> %s", sourceColumnName, targetColumnName));
mapChecker.put(sourceColumnName, targetColumnName);
}
log.info("Mapping relation : \ntable mapper :\n\t{} \ncolumn mapper :\n\t{} ",
tableNameMapString, columnMapperPairs.stream().collect(Collectors.joining("\n\t")));
Set<String> valueSet = new HashSet<>(mapChecker.values());
if (mapChecker.keySet().size() != valueSet.size()) {
throw new RuntimeException("字段映射配置有误,多个字段映射到一个同名字段!");
}
IDatabaseWriter writer = DatabaseWriterFactory.createDatabaseWriter(
targetDataSource, properties.getTarget().getWriterEngineInsert());
if (properties.getTarget().getTargetDrop()) {
/*
如果配置了dbswitch.target.datasource-target-drop=true时先执行drop table语句然后执行create
table语句
如果配置了dbswitch.target.datasource-target-drop=true时
<p>
先执行drop table语句然后执行create table语句
*/
// 先drop表
try {
IDatabaseOperator targetOperator = DatabaseOperatorFactory
.createDatabaseOperator(targetDataSource);
targetOperator.dropTable(properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName());
DatabaseOperatorFactory.createDatabaseOperator(targetDataSource)
.dropTable(targetSchemaName, targetTableName);
log.info("Target Table {}.{} is exits, drop it now !", targetSchemaName, targetTableName);
} catch (Exception e) {
log.info("Target Table {}.{} is not exits!", properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName());
log.info("Target Table {}.{} is not exits, create it!", targetSchemaName, targetTableName);
}
// 然后create表
List<ColumnDescription> columnDescriptions = sourceMetaDataService
.queryTableColumnMeta(sourceProperties.getUrl(),
sourceProperties.getUsername(), sourceProperties.getPassword(),
tableDescription.getSchemaName(),
tableDescription.getTableName());
List<String> primaryKeys = sourceMetaDataService
.queryTablePrimaryKeys(sourceProperties.getUrl(),
sourceProperties.getUsername(), sourceProperties.getPassword(),
tableDescription.getSchemaName(),
tableDescription.getTableName());
String sqlCreateTable = sourceMetaDataService
.getDDLCreateTableSQL(targetDatabaseType, columnDescriptions, primaryKeys,
properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName(),
properties.getTarget().getCreateTableAutoIncrement());
// 生成建表语句并创建
String sqlCreateTable = sourceMetaDataService.getDDLCreateTableSQL(
targetProductType, targetColumnDescriptions, targetPrimaryKeys,
targetSchemaName, targetTableName, properties.getTarget().getCreateTableAutoIncrement());
JdbcTemplate targetJdbcTemplate = new JdbcTemplate(targetDataSource);
targetJdbcTemplate.execute(sqlCreateTable);
log.info("Execute SQL: \n{}", sqlCreateTable);
return doFullCoverSynchronize(tableDescription, sourceProperties, sourceDataSource, writer);
return doFullCoverSynchronize(writer);
} else {
// 判断是否具备变化量同步的条件1两端表结构一致且都有一样的主键字段(2)MySQL使用Innodb引擎
if (properties.getTarget().getChangeDataSync()) {
// 根据主键情况判断同步的方式:增量同步或覆盖同步
JdbcMetaDataUtils mds = new JdbcMetaDataUtils(sourceDataSource);
JdbcMetaDataUtils mdt = new JdbcMetaDataUtils(targetDataSource);
List<String> pks1 = mds.queryTablePrimaryKeys(tableDescription.getSchemaName(),
tableDescription.getTableName());
List<String> pks2 = mdt.queryTablePrimaryKeys(properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName());
IMetaDataByDatasourceService metaDataByDatasourceService =
new MetaDataByDataSourceServiceImpl(targetDataSource, targetProductType);
List<String> dbTargetPks = metaDataByDatasourceService.queryTablePrimaryKeys(
targetSchemaName, targetTableName);
if (!pks1.isEmpty() && !pks2.isEmpty() && pks1.containsAll(pks2) && pks2
.containsAll(pks1)) {
if (targetDatabaseType == DatabaseTypeEnum.MYSQL
&& !JdbcTemplateUtils
.isMysqlInnodbStorageEngine(properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName(),
targetDataSource)) {
return doFullCoverSynchronize(tableDescription, sourceProperties, sourceDataSource,
writer);
if (!targetPrimaryKeys.isEmpty() && !dbTargetPks.isEmpty()
&& targetPrimaryKeys.containsAll(dbTargetPks)
&& dbTargetPks.containsAll(targetPrimaryKeys)) {
if (targetProductType == DatabaseTypeEnum.MYSQL
&& !DatabaseAwareUtils.isMysqlInnodbStorageEngine(
targetSchemaName, targetTableName, targetDataSource)) {
return doFullCoverSynchronize(writer);
} else {
List<String> fields = mds.queryTableColumnName(tableDescription.getSchemaName(),
tableDescription.getTableName());
return doIncreaseSynchronize(tableDescription, sourceProperties, sourceDataSource,
writer, pks1, fields);
return doIncreaseSynchronize(writer);
}
} else {
return doFullCoverSynchronize(tableDescription, sourceProperties, sourceDataSource,
writer);
return doFullCoverSynchronize(writer);
}
} else {
return doFullCoverSynchronize(tableDescription, sourceProperties, sourceDataSource, writer);
return doFullCoverSynchronize(writer);
}
}
}
@@ -176,41 +222,32 @@ public class MigrationHandler implements Supplier<Long> {
/**
* 执行覆盖同步
*
* @param tableDescription 表的描述信息,可能是视图表,可能是物理表
* @param writer 目的端的写入器
* @param writer 目的端的写入器
*/
private Long doFullCoverSynchronize(TableDescription tableDescription,
DbswichProperties.SourceDataSourceProperties sourceProperties,
HikariDataSource sourceDataSource,
IDatabaseWriter writer) {
private Long doFullCoverSynchronize(IDatabaseWriter writer) {
final int BATCH_SIZE = fetchSize;
// 准备目的端的数据写入操作
writer.prepareWrite(properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName());
writer.prepareWrite(targetSchemaName, targetTableName);
// 清空目的端表的数据
IDatabaseOperator targetOperator = DatabaseOperatorFactory
.createDatabaseOperator(writer.getDataSource());
targetOperator.truncateTableData(properties.getTarget().getTargetSchema(),
sourceProperties.getPrefixTable() + tableDescription.getTableName());
targetOperator.truncateTableData(targetSchemaName, targetTableName);
// 查询源端数据并写入目的端
IDatabaseOperator sourceOperator = DatabaseOperatorFactory
.createDatabaseOperator(sourceDataSource);
sourceOperator.setFetchSize(BATCH_SIZE);
DatabaseTypeEnum sourceDatabaseType = JdbcTemplateUtils
.getDatabaseProduceName(sourceDataSource);
String fullTableName = CommonUtils.getTableFullNameByDatabase(sourceDatabaseType,
tableDescription.getSchemaName(), tableDescription.getTableName());
Map<String, Integer> columnMetaData = JdbcTemplateUtils.getColumnMetaData(
sourceDataSource, sourceDatabaseType, tableDescription.getSchemaName(),
tableDescription.getTableName());
List<String> fields = new ArrayList<>(columnMetaData.keySet());
StatementResultSet srs = sourceOperator
.queryTableData(tableDescription.getSchemaName(), tableDescription.getTableName(), fields);
List<String> sourceFields = sourceColumnDescriptions.stream()
.map(ColumnDescription::getFieldName)
.collect(Collectors.toList());
List<String> targetFields = targetColumnDescriptions.stream()
.map(ColumnDescription::getFieldName)
.collect(Collectors.toList());
StatementResultSet srs = sourceOperator.queryTableData(
sourceSchemaName, sourceTableName, sourceFields);
List<Object[]> cache = new LinkedList<>();
long cacheBytes = 0;
@@ -218,13 +255,13 @@ public class MigrationHandler implements Supplier<Long> {
long totalBytes = 0;
try (ResultSet rs = srs.getResultset()) {
while (rs.next()) {
Object[] record = new Object[fields.size()];
for (int i = 1; i <= fields.size(); ++i) {
Object[] record = new Object[sourceFields.size()];
for (int i = 1; i <= sourceFields.size(); ++i) {
try {
record[i - 1] = rs.getObject(i);
} catch (Exception e) {
log.warn("!!! Read data from table [ {} ] use function ResultSet.getObject() error",
fullTableName, e);
tableNameMapString, e);
record[i - 1] = null;
}
}
@@ -234,9 +271,9 @@ public class MigrationHandler implements Supplier<Long> {
++totalCount;
if (cache.size() >= BATCH_SIZE || cacheBytes >= MAX_CACHE_BYTES_SIZE) {
long ret = writer.write(fields, cache);
long ret = writer.write(targetFields, cache);
log.info("[FullCoverSync] handle table [{}] data count: {}, the batch bytes sie: {}",
fullTableName, ret, BytesUnitUtils.bytesSizeToHuman(cacheBytes));
tableNameMapString, ret, BytesUnitUtils.bytesSizeToHuman(cacheBytes));
cache.clear();
totalBytes += cacheBytes;
cacheBytes = 0;
@@ -244,15 +281,15 @@ public class MigrationHandler implements Supplier<Long> {
}
if (cache.size() > 0) {
long ret = writer.write(fields, cache);
long ret = writer.write(targetFields, cache);
log.info("[FullCoverSync] handle table [{}] data count: {}, last batch bytes sie: {}",
fullTableName, ret, BytesUnitUtils.bytesSizeToHuman(cacheBytes));
tableNameMapString, ret, BytesUnitUtils.bytesSizeToHuman(cacheBytes));
cache.clear();
totalBytes += cacheBytes;
}
log.info("[FullCoverSync] handle table [{}] total data count:{}, total bytes={}",
fullTableName, totalCount, BytesUnitUtils.bytesSizeToHuman(totalBytes));
tableNameMapString, totalCount, BytesUnitUtils.bytesSizeToHuman(totalBytes));
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
@@ -265,38 +302,40 @@ public class MigrationHandler implements Supplier<Long> {
/**
* 变化量同步
*
* @param tableDescription 表的描述信息,这里只能是物理表
* @param writer 目的端的写入器
* @param writer 目的端的写入器
*/
private Long doIncreaseSynchronize(TableDescription tableDescription,
DbswichProperties.SourceDataSourceProperties sourceProperties,
HikariDataSource sourceDataSource,
IDatabaseWriter writer, List<String> pks, List<String> fields) {
private Long doIncreaseSynchronize(IDatabaseWriter writer) {
final int BATCH_SIZE = fetchSize;
List<String> sourceFields = sourceColumnDescriptions.stream()
.map(ColumnDescription::getFieldName)
.collect(Collectors.toList());
List<String> targetFields = targetColumnDescriptions.stream()
.map(ColumnDescription::getFieldName)
.collect(Collectors.toList());
DatabaseTypeEnum sourceDatabaseType = JdbcTemplateUtils
.getDatabaseProduceName(sourceDataSource);
String fullTableName = CommonUtils.getTableFullNameByDatabase(sourceDatabaseType,
tableDescription.getSchemaName(),
sourceProperties.getPrefixTable() + tableDescription.getTableName());
Map<String, String> columnNameMaps = new HashMap<>();
for (int i = 0; i < sourceFields.size(); ++i) {
columnNameMaps.put(sourceFields.get(i), targetFields.get(i));
}
TaskParamEntity.TaskParamEntityBuilder taskBuilder = TaskParamEntity.builder();
taskBuilder.oldDataSource(writer.getDataSource());
taskBuilder.oldSchemaName(properties.getTarget().getTargetSchema());
taskBuilder.oldTableName(sourceProperties.getPrefixTable() + tableDescription.getTableName());
taskBuilder.oldSchemaName(targetSchemaName);
taskBuilder.oldTableName(targetTableName);
taskBuilder.newDataSource(sourceDataSource);
taskBuilder.newSchemaName(tableDescription.getSchemaName());
taskBuilder.newTableName(tableDescription.getTableName());
taskBuilder.fieldColumns(fields);
taskBuilder.newSchemaName(sourceSchemaName);
taskBuilder.newTableName(sourceTableName);
taskBuilder.fieldColumns(sourceFields);
taskBuilder.columnsMap(columnNameMaps);
TaskParamEntity param = taskBuilder.build();
IDatabaseSynchronize synchronizer = DatabaseSynchronizeFactory
.createDatabaseWriter(writer.getDataSource());
synchronizer.prepare(param.getOldSchemaName(), param.getOldTableName(), fields, pks);
synchronizer.prepare(targetSchemaName, targetTableName, targetFields, targetPrimaryKeys);
IDatabaseChangeCaculator calculator = new ChangeCalculatorService();
calculator.setFetchSize(BATCH_SIZE);
calculator.setFetchSize(fetchSize);
calculator.setRecordIdentical(false);
calculator.setCheckJdbcType(false);
@@ -353,8 +392,8 @@ public class MigrationHandler implements Supplier<Long> {
doUpdate(fields);
}
log.info("[IncreaseSync] Handle table [{}] data one batch size: {}", fullTableName,
BytesUnitUtils.bytesSizeToHuman(cacheBytes));
log.info("[IncreaseSync] Handle table [{}] data one batch size: {}",
tableNameMapString, BytesUnitUtils.bytesSizeToHuman(cacheBytes));
cacheBytes = 0;
}
}
@@ -374,24 +413,24 @@ public class MigrationHandler implements Supplier<Long> {
}
log.info("[IncreaseSync] Handle table [{}] total count: {}, Insert:{},Update:{},Delete:{} ",
fullTableName, countTotal, countInsert, countUpdate, countDelete);
tableNameMapString, countTotal, countInsert, countUpdate, countDelete);
}
private void doInsert(List<String> fields) {
long ret = synchronizer.executeInsert(cacheInsert);
log.info("[IncreaseSync] Handle table [{}] data Insert count: {}", fullTableName, ret);
log.info("[IncreaseSync] Handle table [{}] data Insert count: {}", tableNameMapString, ret);
cacheInsert.clear();
}
private void doUpdate(List<String> fields) {
long ret = synchronizer.executeUpdate(cacheUpdate);
log.info("[IncreaseSync] Handle table [{}] data Update count: {}", fullTableName, ret);
log.info("[IncreaseSync] Handle table [{}] data Update count: {}", tableNameMapString, ret);
cacheUpdate.clear();
}
private void doDelete(List<String> fields) {
long ret = synchronizer.executeDelete(cacheDelete);
log.info("[IncreaseSync] Handle table [{}] data Delete count: {}", fullTableName, ret);
log.info("[IncreaseSync] Handle table [{}] data Delete count: {}", tableNameMapString, ret);
cacheDelete.clear();
}

View File

@@ -11,16 +11,16 @@ package com.gitee.dbswitch.data.service;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.gitee.dbswitch.common.type.DBTableType;
import com.gitee.dbswitch.common.util.DbswitchStrUtils;
import com.gitee.dbswitch.core.model.TableDescription;
import com.gitee.dbswitch.core.service.IMetaDataService;
import com.gitee.dbswitch.core.service.impl.MigrationMetaDataServiceImpl;
import com.gitee.dbswitch.core.service.IMetaDataByDatasourceService;
import com.gitee.dbswitch.core.service.impl.MetaDataByDataSourceServiceImpl;
import com.gitee.dbswitch.data.config.DbswichProperties;
import com.gitee.dbswitch.data.domain.PerfStat;
import com.gitee.dbswitch.data.entity.SourceDataSourceProperties;
import com.gitee.dbswitch.data.handler.MigrationHandler;
import com.gitee.dbswitch.data.util.BytesUnitUtils;
import com.gitee.dbswitch.data.util.DataSourceUtils;
import com.gitee.dbswitch.data.util.JdbcTemplateUtils;
import com.gitee.dbswitch.data.util.StrUtils;
import com.zaxxer.hikari.HikariDataSource;
import java.util.ArrayList;
import java.util.List;
@@ -81,20 +81,20 @@ public class MigrationService {
.createTargetDataSource(properties.getTarget())) {
int sourcePropertiesIndex = 0;
int totalTableCount = 0;
List<DbswichProperties.SourceDataSourceProperties> sourcesProperties = properties.getSource();
for (DbswichProperties.SourceDataSourceProperties sourceProperties : sourcesProperties) {
List<SourceDataSourceProperties> sourcesProperties = properties.getSource();
for (SourceDataSourceProperties sourceProperties : sourcesProperties) {
try (HikariDataSource sourceDataSource = DataSourceUtils
.createSourceDataSource(sourceProperties)) {
IMetaDataService sourceMetaDataService = new MigrationMetaDataServiceImpl();
sourceMetaDataService
.setDatabaseConnection(JdbcTemplateUtils.getDatabaseProduceName(sourceDataSource));
IMetaDataByDatasourceService
sourceMetaDataService = new MetaDataByDataSourceServiceImpl(sourceDataSource);
// 判断处理的策略:是排除还是包含
List<String> includes = StrUtils.stringToList(sourceProperties.getSourceIncludes());
List<String> includes = DbswitchStrUtils
.stringToList(sourceProperties.getSourceIncludes());
log.info("Includes tables is :{}", jackson.writeValueAsString(includes));
List<String> filters = StrUtils.stringToList(sourceProperties.getSourceExcludes());
List<String> filters = DbswitchStrUtils
.stringToList(sourceProperties.getSourceExcludes());
log.info("Filter tables is :{}", jackson.writeValueAsString(filters));
boolean useExcludeTables = includes.isEmpty();
@@ -108,16 +108,14 @@ public class MigrationService {
List<CompletableFuture<Void>> futures = new ArrayList<>();
List<String> schemas = StrUtils.stringToList(sourceProperties.getSourceSchema());
List<String> schemas = DbswitchStrUtils.stringToList(sourceProperties.getSourceSchema());
log.info("Source schema names is :{}", jackson.writeValueAsString(schemas));
AtomicInteger numberOfFailures = new AtomicInteger(0);
AtomicLong totalBytesSize = new AtomicLong(0L);
final int indexInternal = sourcePropertiesIndex;
for (String schema : schemas) {
List<TableDescription> tableList = sourceMetaDataService
.queryTableList(sourceProperties.getUrl(),
sourceProperties.getUsername(), sourceProperties.getPassword(), schema);
List<TableDescription> tableList = sourceMetaDataService.queryTableList(schema);
if (tableList.isEmpty()) {
log.warn("### Find source database table list empty for schema name is : {}", schema);
} else {

View File

@@ -1,3 +1,12 @@
// Copyright tang. All rights reserved.
// https://gitee.com/inrgihc/dbswitch
//
// Use of this source code is governed by a BSD-style license
//
// Author: tang (inrgihc@126.com)
// Date : 2020/1/2
// Location: beijing , china
/////////////////////////////////////////////////////////////
package com.gitee.dbswitch.data.util;
import java.text.DecimalFormat;

View File

@@ -1,6 +1,16 @@
// Copyright tang. All rights reserved.
// https://gitee.com/inrgihc/dbswitch
//
// Use of this source code is governed by a BSD-style license
//
// Author: tang (inrgihc@126.com)
// Date : 2020/1/2
// Location: beijing , china
/////////////////////////////////////////////////////////////
package com.gitee.dbswitch.data.util;
import com.gitee.dbswitch.data.config.DbswichProperties;
import com.gitee.dbswitch.data.entity.SourceDataSourceProperties;
import com.gitee.dbswitch.data.entity.TargetDataSourceProperties;
import com.zaxxer.hikari.HikariDataSource;
import java.util.Objects;
import lombok.extern.slf4j.Slf4j;
@@ -17,29 +27,29 @@ public final class DataSourceUtils {
/**
* 创建于指定数据库连接描述符的连接池
*
* @param description 数据库连接描述符
* @param properties 数据库连接描述符
* @return HikariDataSource连接池
*/
public static HikariDataSource createSourceDataSource(
DbswichProperties.SourceDataSourceProperties description) {
public static HikariDataSource createSourceDataSource(SourceDataSourceProperties properties) {
HikariDataSource ds = new HikariDataSource();
ds.setPoolName("The_Source_DB_Connection");
ds.setJdbcUrl(description.getUrl());
ds.setDriverClassName(description.getDriverClassName());
ds.setUsername(description.getUsername());
ds.setPassword(description.getPassword());
if (description.getDriverClassName().contains("oracle")) {
ds.setJdbcUrl(properties.getUrl());
ds.setDriverClassName(properties.getDriverClassName());
ds.setUsername(properties.getUsername());
ds.setPassword(properties.getPassword());
if (properties.getDriverClassName().contains("oracle")) {
ds.setConnectionTestQuery("SELECT 'Hello' from DUAL");
// https://blog.csdn.net/qq_20960159/article/details/78593936
System.getProperties().setProperty("oracle.jdbc.J2EE13Compliant", "true");
} else if (description.getDriverClassName().contains("db2")) {
} else if (properties.getDriverClassName().contains("db2")) {
ds.setConnectionTestQuery("SELECT 1 FROM SYSIBM.SYSDUMMY1");
} else {
ds.setConnectionTestQuery("SELECT 1");
}
ds.setMaximumPoolSize(8);
ds.setMinimumIdle(5);
ds.setConnectionTimeout(60000);
ds.setMaxLifetime(properties.getMaxLifeTime());
ds.setConnectionTimeout(properties.getConnectionTimeout());
ds.setIdleTimeout(60000);
return ds;
@@ -48,40 +58,40 @@ public final class DataSourceUtils {
/**
* 创建于指定数据库连接描述符的连接池
*
* @param description 数据库连接描述符
* @param properties 数据库连接描述符
* @return HikariDataSource连接池
*/
public static HikariDataSource createTargetDataSource(
DbswichProperties.TargetDataSourceProperties description) {
if (description.getUrl().trim().startsWith("jdbc:hive2://")) {
public static HikariDataSource createTargetDataSource(TargetDataSourceProperties properties) {
if (properties.getUrl().trim().startsWith("jdbc:hive2://")) {
throw new UnsupportedOperationException("Unsupported hive as target datasource!!!");
}
HikariDataSource ds = new HikariDataSource();
ds.setPoolName("The_Target_DB_Connection");
ds.setJdbcUrl(description.getUrl());
ds.setDriverClassName(description.getDriverClassName());
ds.setUsername(description.getUsername());
ds.setPassword(description.getPassword());
if (description.getDriverClassName().contains("oracle")) {
ds.setJdbcUrl(properties.getUrl());
ds.setDriverClassName(properties.getDriverClassName());
ds.setUsername(properties.getUsername());
ds.setPassword(properties.getPassword());
if (properties.getDriverClassName().contains("oracle")) {
ds.setConnectionTestQuery("SELECT 'Hello' from DUAL");
} else if (description.getDriverClassName().contains("db2")) {
} else if (properties.getDriverClassName().contains("db2")) {
ds.setConnectionTestQuery("SELECT 1 FROM SYSIBM.SYSDUMMY1");
} else {
ds.setConnectionTestQuery("SELECT 1");
}
ds.setMaximumPoolSize(8);
ds.setMinimumIdle(5);
ds.setConnectionTimeout(30000);
ds.setMaxLifetime(properties.getMaxLifeTime());
ds.setConnectionTimeout(properties.getConnectionTimeout());
ds.setIdleTimeout(60000);
// 如果是Greenplum数据库这里需要关闭会话的查询优化器
if (description.getDriverClassName().contains("postgresql")) {
if (properties.getDriverClassName().contains("postgresql")) {
org.springframework.jdbc.datasource.DriverManagerDataSource dataSource = new org.springframework.jdbc.datasource.DriverManagerDataSource();
dataSource.setDriverClassName(description.getDriverClassName());
dataSource.setUrl(description.getUrl());
dataSource.setUsername(description.getUsername());
dataSource.setPassword(description.getPassword());
dataSource.setDriverClassName(properties.getDriverClassName());
dataSource.setUrl(properties.getUrl());
dataSource.setUsername(properties.getUsername());
dataSource.setPassword(properties.getPassword());
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
String versionString = jdbcTemplate.queryForObject("SELECT version()", String.class);
if (Objects.nonNull(versionString) && versionString.contains("Greenplum")) {
@@ -96,4 +106,5 @@ public final class DataSourceUtils {
private DataSourceUtils() {
}
}

View File

@@ -1,130 +0,0 @@
// Copyright tang. All rights reserved.
// https://gitee.com/inrgihc/dbswitch
//
// Use of this source code is governed by a BSD-style license
//
// Author: tang (inrgihc@126.com)
// Date : 2020/1/2
// Location: beijing , china
/////////////////////////////////////////////////////////////
package com.gitee.dbswitch.data.util;
import com.gitee.dbswitch.common.type.DatabaseTypeEnum;
import com.gitee.dbswitch.common.util.CommonUtils;
import com.gitee.dbswitch.dbcommon.util.DatabaseAwareUtils;
import com.gitee.dbswitch.common.util.HivePrepareUtils;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.HashMap;
import java.util.Map;
import javax.sql.DataSource;
import org.springframework.boot.jdbc.DatabaseDriver;
import org.springframework.jdbc.core.JdbcTemplate;
/**
* JdbcTemplate包制使用工具类型
*
* @author tang
*/
public final class JdbcTemplateUtils {
private JdbcTemplateUtils() {
}
/**
* 获取数据库类型
*
* @param dataSource 数据源
* @return DatabaseType 数据库类型
*/
public static DatabaseTypeEnum getDatabaseProduceName(
DataSource dataSource) {
String productName = DatabaseAwareUtils.getDatabaseNameByDataSource(dataSource);
if (productName.equalsIgnoreCase("Greenplum")) {
return DatabaseTypeEnum.GREENPLUM;
} else if (productName.equalsIgnoreCase("SQLServer")) {
return DatabaseTypeEnum.SQLSERVER;
} else if (productName.equalsIgnoreCase("DM")) {
return DatabaseTypeEnum.DM;
} else if (productName.equalsIgnoreCase("Kingbase")) {
return DatabaseTypeEnum.KINGBASE;
} else if (productName.equalsIgnoreCase("Hive")) {
return DatabaseTypeEnum.HIVE;
} else {
DatabaseDriver databaseDriver = DatabaseDriver.fromProductName(productName);
if (DatabaseDriver.MARIADB == databaseDriver) {
return DatabaseTypeEnum.MARIADB;
} else if (DatabaseDriver.MYSQL == databaseDriver) {
return DatabaseTypeEnum.MYSQL;
} else if (DatabaseDriver.ORACLE == databaseDriver) {
return DatabaseTypeEnum.ORACLE;
} else if (DatabaseDriver.POSTGRESQL == databaseDriver) {
return DatabaseTypeEnum.POSTGRESQL;
} else if (DatabaseDriver.DB2 == databaseDriver) {
return DatabaseTypeEnum.DB2;
} else {
throw new RuntimeException(
String.format("Unsupported database type by product name [%s]", productName));
}
}
}
/**
* 获取表字段的元信息
*
* @param dataSource DataSource
* @param databaseType databaseType
* @param schemaName schemaName
* @param tableName tableName
* @return Map<String, Integer>
*/
public static Map<String, Integer> getColumnMetaData(
DataSource dataSource, DatabaseTypeEnum databaseType,
String schemaName, String tableName) {
String fullTableName = CommonUtils.getTableFullNameByDatabase(databaseType,
schemaName, tableName);
final String sql = String.format("select * from %s where 1=2", fullTableName);
Map<String, Integer> columnMetaData = new HashMap<>();
try (Connection connection = dataSource.getConnection()) {
try (Statement stmt = connection.createStatement()) {
if (connection.getMetaData().getDatabaseProductName().contains("Hive")) {
HivePrepareUtils.prepare(connection, schemaName, tableName);
}
try (ResultSet rs = stmt.executeQuery(sql)) {
ResultSetMetaData rsMetaData = rs.getMetaData();
for (int i = 0, len = rsMetaData.getColumnCount(); i < len; i++) {
columnMetaData.put(rsMetaData.getColumnName(i + 1), rsMetaData.getColumnType(i + 1));
}
return columnMetaData;
}
}
} catch (SQLException e) {
throw new RuntimeException(
String.format("获取表:%s 的字段的元信息时失败. 请联系 DBA 核查该库、表信息.", fullTableName), e);
}
}
/**
* 检查MySQL数据库表的存储引擎是否为Innodb
*
* @param schemaName schema名
* @param tableName table名
* @param dataSource 数据源
* @return 为Innodb存储引擎时返回True, 否在为false
*/
public static boolean isMysqlInnodbStorageEngine(
String schemaName,
String tableName,
DataSource dataSource) {
String sql = "SELECT count(*) as total FROM information_schema.tables "
+ "WHERE table_schema=? AND table_name=? AND ENGINE='InnoDB'";
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
return jdbcTemplate.queryForObject(sql, new Object[]{schemaName, tableName}, Integer.class)
> 0;
}
}

View File

@@ -1,35 +0,0 @@
package com.gitee.dbswitch.data.util;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
/**
* 字符串工具类
*
* @author tang
* @date 2021/6/8 20:55
*/
public final class StrUtils {
/**
* 根据逗号切分字符串为数组
*
* @param str 待切分的字符串
* @return List
*/
public static List<String> stringToList(String str) {
if (!StringUtils.isEmpty(str)) {
String[] strs = str.split(",");
if (strs.length > 0) {
return new ArrayList<>(Arrays.asList(strs));
}
}
return new ArrayList<>();
}
private StrUtils() {
}
}

View File

@@ -10,14 +10,18 @@ dbswitch:
# source database configuration parameters
## fetch size for query source database
fetch-size: 10000
## schema name for query source database
## schema name for query source schemas, separate by ','
source-schema: 'TANG'
## prefix of table name for target name
prefix-table: 'TA_'
## table name include from table lists
## table name include from table lists, separate by ','
source-includes: ''
## table name exclude from table lists
## table name exclude from table lists, separate by ','
source-excludes: ''
## table name convert mapper by regular expression
regex-table-mapper:
- 'from-pattern': '^'
'to-value': 'T_'
## columns name convert mapper by regular expression like regex-table-mapper
regex-column-mapper:
target:
# target database connection information