|
@@ -7,27 +7,41 @@ import org.dbsyncer.common.model.AbstractConnectorConfig;
|
|
import org.dbsyncer.common.model.IncrementConvertContext;
|
|
import org.dbsyncer.common.model.IncrementConvertContext;
|
|
import org.dbsyncer.common.model.Result;
|
|
import org.dbsyncer.common.model.Result;
|
|
import org.dbsyncer.common.spi.ConnectorMapper;
|
|
import org.dbsyncer.common.spi.ConnectorMapper;
|
|
|
|
+import org.dbsyncer.common.util.CollectionUtils;
|
|
import org.dbsyncer.common.util.StringUtil;
|
|
import org.dbsyncer.common.util.StringUtil;
|
|
import org.dbsyncer.connector.ConnectorFactory;
|
|
import org.dbsyncer.connector.ConnectorFactory;
|
|
-import org.dbsyncer.parser.ParserFactory;
|
|
|
|
|
|
+import org.dbsyncer.connector.config.DDLConfig;
|
|
|
|
+import org.dbsyncer.connector.constant.ConnectorConstant;
|
|
|
|
+import org.dbsyncer.connector.enums.ConnectorEnum;
|
|
|
|
+import org.dbsyncer.connector.model.MetaInfo;
|
|
|
|
+import org.dbsyncer.parser.Parser;
|
|
|
|
+import org.dbsyncer.parser.ddl.DDLParser;
|
|
import org.dbsyncer.parser.flush.AbstractBufferActuator;
|
|
import org.dbsyncer.parser.flush.AbstractBufferActuator;
|
|
import org.dbsyncer.parser.model.BatchWriter;
|
|
import org.dbsyncer.parser.model.BatchWriter;
|
|
|
|
+import org.dbsyncer.parser.model.ConfigModel;
|
|
import org.dbsyncer.parser.model.Connector;
|
|
import org.dbsyncer.parser.model.Connector;
|
|
|
|
+import org.dbsyncer.parser.model.FieldMapping;
|
|
import org.dbsyncer.parser.model.Mapping;
|
|
import org.dbsyncer.parser.model.Mapping;
|
|
import org.dbsyncer.parser.model.Picker;
|
|
import org.dbsyncer.parser.model.Picker;
|
|
import org.dbsyncer.parser.model.TableGroup;
|
|
import org.dbsyncer.parser.model.TableGroup;
|
|
import org.dbsyncer.parser.model.WriterRequest;
|
|
import org.dbsyncer.parser.model.WriterRequest;
|
|
import org.dbsyncer.parser.model.WriterResponse;
|
|
import org.dbsyncer.parser.model.WriterResponse;
|
|
import org.dbsyncer.parser.strategy.FlushStrategy;
|
|
import org.dbsyncer.parser.strategy.FlushStrategy;
|
|
|
|
+import org.dbsyncer.parser.util.ConfigModelUtil;
|
|
import org.dbsyncer.parser.util.ConvertUtil;
|
|
import org.dbsyncer.parser.util.ConvertUtil;
|
|
import org.dbsyncer.parser.util.PickerUtil;
|
|
import org.dbsyncer.parser.util.PickerUtil;
|
|
import org.dbsyncer.plugin.PluginFactory;
|
|
import org.dbsyncer.plugin.PluginFactory;
|
|
|
|
+import org.dbsyncer.storage.StorageService;
|
|
|
|
+import org.dbsyncer.storage.enums.StorageEnum;
|
|
|
|
+import org.slf4j.Logger;
|
|
|
|
+import org.slf4j.LoggerFactory;
|
|
import org.springframework.context.ApplicationContext;
|
|
import org.springframework.context.ApplicationContext;
|
|
import org.springframework.stereotype.Component;
|
|
import org.springframework.stereotype.Component;
|
|
import org.springframework.util.Assert;
|
|
import org.springframework.util.Assert;
|
|
|
|
|
|
import javax.annotation.PostConstruct;
|
|
import javax.annotation.PostConstruct;
|
|
import javax.annotation.Resource;
|
|
import javax.annotation.Resource;
|
|
|
|
+import java.util.Date;
|
|
import java.util.List;
|
|
import java.util.List;
|
|
import java.util.Map;
|
|
import java.util.Map;
|
|
import java.util.concurrent.Executor;
|
|
import java.util.concurrent.Executor;
|
|
@@ -42,6 +56,8 @@ import java.util.concurrent.Executor;
|
|
@Component
|
|
@Component
|
|
public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest, WriterResponse> {
|
|
public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest, WriterResponse> {
|
|
|
|
|
|
|
|
+ private final Logger logger = LoggerFactory.getLogger(getClass());
|
|
|
|
+
|
|
@Resource
|
|
@Resource
|
|
private GeneralBufferConfig generalBufferConfig;
|
|
private GeneralBufferConfig generalBufferConfig;
|
|
|
|
|
|
@@ -52,7 +68,7 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
private ConnectorFactory connectorFactory;
|
|
private ConnectorFactory connectorFactory;
|
|
|
|
|
|
@Resource
|
|
@Resource
|
|
- private ParserFactory parserFactory;
|
|
|
|
|
|
+ private Parser parser;
|
|
|
|
|
|
@Resource
|
|
@Resource
|
|
private PluginFactory pluginFactory;
|
|
private PluginFactory pluginFactory;
|
|
@@ -63,9 +79,15 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
@Resource
|
|
@Resource
|
|
private CacheService cacheService;
|
|
private CacheService cacheService;
|
|
|
|
|
|
|
|
+ @Resource
|
|
|
|
+ private StorageService storageService;
|
|
|
|
+
|
|
@Resource
|
|
@Resource
|
|
private ApplicationContext applicationContext;
|
|
private ApplicationContext applicationContext;
|
|
|
|
|
|
|
|
+ @Resource
|
|
|
|
+ private DDLParser ddlParser;
|
|
|
|
+
|
|
@PostConstruct
|
|
@PostConstruct
|
|
public void init() {
|
|
public void init() {
|
|
setConfig(generalBufferConfig);
|
|
setConfig(generalBufferConfig);
|
|
@@ -79,11 +101,14 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
|
|
|
|
@Override
|
|
@Override
|
|
protected void partition(WriterRequest request, WriterResponse response) {
|
|
protected void partition(WriterRequest request, WriterResponse response) {
|
|
- response.getDataList().add(request.getRow());
|
|
|
|
|
|
+ if (!CollectionUtils.isEmpty(request.getRow())) {
|
|
|
|
+ response.getDataList().add(request.getRow());
|
|
|
|
+ }
|
|
response.getOffsetList().add(request.getChangedOffset());
|
|
response.getOffsetList().add(request.getChangedOffset());
|
|
if (!response.isMerged()) {
|
|
if (!response.isMerged()) {
|
|
response.setTableGroupId(request.getTableGroupId());
|
|
response.setTableGroupId(request.getTableGroupId());
|
|
response.setEvent(request.getEvent());
|
|
response.setEvent(request.getEvent());
|
|
|
|
+ response.setSql(request.getSql());
|
|
response.setMerged(true);
|
|
response.setMerged(true);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -91,21 +116,28 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
@Override
|
|
@Override
|
|
protected boolean skipPartition(WriterRequest nextRequest, WriterResponse response) {
|
|
protected boolean skipPartition(WriterRequest nextRequest, WriterResponse response) {
|
|
// 并发场景,同一条数据可能连续触发Insert > Delete > Insert,批处理任务中出现不同事件时,跳过分区处理
|
|
// 并发场景,同一条数据可能连续触发Insert > Delete > Insert,批处理任务中出现不同事件时,跳过分区处理
|
|
- return !StringUtil.equals(nextRequest.getEvent(), response.getEvent());
|
|
|
|
|
|
+ // 跳过表结构修改事件(保证表结构修改原子性)
|
|
|
|
+ return !StringUtil.equals(nextRequest.getEvent(), response.getEvent()) || isDDLEvent(response.getEvent());
|
|
}
|
|
}
|
|
|
|
|
|
@Override
|
|
@Override
|
|
protected void pull(WriterResponse response) {
|
|
protected void pull(WriterResponse response) {
|
|
- // 1、获取配置信息
|
|
|
|
|
|
+ // 0、获取配置信息
|
|
final TableGroup tableGroup = cacheService.get(response.getTableGroupId(), TableGroup.class);
|
|
final TableGroup tableGroup = cacheService.get(response.getTableGroupId(), TableGroup.class);
|
|
final Mapping mapping = cacheService.get(tableGroup.getMappingId(), Mapping.class);
|
|
final Mapping mapping = cacheService.get(tableGroup.getMappingId(), Mapping.class);
|
|
final TableGroup group = PickerUtil.mergeTableGroupConfig(mapping, tableGroup);
|
|
final TableGroup group = PickerUtil.mergeTableGroupConfig(mapping, tableGroup);
|
|
|
|
+
|
|
|
|
+ // 1、ddl解析
|
|
|
|
+ if (isDDLEvent(response.getEvent())) {
|
|
|
|
+ parseDDl(response, mapping, group);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
final String sourceTableName = group.getSourceTable().getName();
|
|
final String sourceTableName = group.getSourceTable().getName();
|
|
final String targetTableName = group.getTargetTable().getName();
|
|
final String targetTableName = group.getTargetTable().getName();
|
|
final String event = response.getEvent();
|
|
final String event = response.getEvent();
|
|
final Picker picker = new Picker(group.getFieldMapping());
|
|
final Picker picker = new Picker(group.getFieldMapping());
|
|
final List<Map> sourceDataList = response.getDataList();
|
|
final List<Map> sourceDataList = response.getDataList();
|
|
-
|
|
|
|
// 2、映射字段
|
|
// 2、映射字段
|
|
List<Map> targetDataList = picker.pickData(sourceDataList);
|
|
List<Map> targetDataList = picker.pickData(sourceDataList);
|
|
|
|
|
|
@@ -120,7 +152,7 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
|
|
|
|
// 5、批量执行同步
|
|
// 5、批量执行同步
|
|
BatchWriter batchWriter = new BatchWriter(tConnectorMapper, group.getCommand(), targetTableName, event, picker.getTargetFields(), targetDataList, generalBufferConfig.getBufferWriterCount());
|
|
BatchWriter batchWriter = new BatchWriter(tConnectorMapper, group.getCommand(), targetTableName, event, picker.getTargetFields(), targetDataList, generalBufferConfig.getBufferWriterCount());
|
|
- Result result = parserFactory.writeBatch(context, batchWriter, generalExecutor);
|
|
|
|
|
|
+ Result result = parser.writeBatch(context, batchWriter, generalExecutor);
|
|
|
|
|
|
// 6.发布刷新增量点事件
|
|
// 6.发布刷新增量点事件
|
|
applicationContext.publishEvent(new RefreshOffsetEvent(applicationContext, response.getOffsetList()));
|
|
applicationContext.publishEvent(new RefreshOffsetEvent(applicationContext, response.getOffsetList()));
|
|
@@ -139,6 +171,67 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
return generalExecutor;
|
|
return generalExecutor;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ private boolean isDDLEvent(String event) {
|
|
|
|
+ return StringUtil.equals(event, ConnectorConstant.OPERTION_ALTER);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * 解析DDL
|
|
|
|
+ *
|
|
|
|
+ * @param response
|
|
|
|
+ * @param mapping
|
|
|
|
+ * @param tableGroup
|
|
|
|
+ */
|
|
|
|
+ private void parseDDl(WriterResponse response, Mapping mapping, TableGroup tableGroup) {
|
|
|
|
+ try {
|
|
|
|
+ AbstractConnectorConfig sConnConfig = getConnectorConfig(mapping.getSourceConnectorId());
|
|
|
|
+ AbstractConnectorConfig tConnConfig = getConnectorConfig(mapping.getTargetConnectorId());
|
|
|
|
+ String sConnType = sConnConfig.getConnectorType();
|
|
|
|
+ String tConnType = tConnConfig.getConnectorType();
|
|
|
|
+ // 0.生成目标表执行SQL(暂支持MySQL) fixme AE86 暂内测MySQL作为试运行版本
|
|
|
|
+ if (StringUtil.equals(sConnType, tConnType) && StringUtil.equals(ConnectorEnum.MYSQL.getType(), tConnType)) {
|
|
|
|
+ String targetTableName = tableGroup.getTargetTable().getName();
|
|
|
|
+ List<FieldMapping> originalFieldMappings = tableGroup.getFieldMapping();
|
|
|
|
+ DDLConfig targetDDLConfig = ddlParser.parseDDlConfig(response.getSql(), tConnType, targetTableName, originalFieldMappings);
|
|
|
|
+ final ConnectorMapper tConnectorMapper = connectorFactory.connect(tConnConfig);
|
|
|
|
+ Result result = connectorFactory.writerDDL(tConnectorMapper, targetDDLConfig);
|
|
|
|
+ result.setTableGroupId(tableGroup.getId());
|
|
|
|
+ result.setTargetTableGroupName(targetTableName);
|
|
|
|
+
|
|
|
|
+ // TODO life
|
|
|
|
+ // 1.获取目标表最新的属性字段
|
|
|
|
+ MetaInfo targetMetaInfo = parser.getMetaInfo(mapping.getTargetConnectorId(), targetTableName);
|
|
|
|
+ MetaInfo originMetaInfo = parser.getMetaInfo(mapping.getSourceConnectorId(), tableGroup.getSourceTable().getName());
|
|
|
|
+ // 1.1 参考 org.dbsyncer.biz.checker.impl.tablegroup.TableGroupChecker.refreshTableFields
|
|
|
|
+ //上面已经是刷新了
|
|
|
|
+
|
|
|
|
+ // 1.2 要注意,表支持自定义主键,要兼容处理
|
|
|
|
+ //主键问题还未涉及到这种情况,可能还没写,可能要是删除主键会需要考虑,其他的情况我应该不会动
|
|
|
|
+
|
|
|
|
+ // 2.更新TableGroup.targetTable
|
|
|
|
+ tableGroup.getSourceTable().setColumn(originMetaInfo.getColumn());
|
|
|
|
+ tableGroup.getTargetTable().setColumn(targetMetaInfo.getColumn());
|
|
|
|
+
|
|
|
|
+ // 3.更新表字段映射(根据保留的更改的属性,进行更改)
|
|
|
|
+ tableGroup.setFieldMapping(ddlParser.refreshFiledMappings(originalFieldMappings, originMetaInfo, targetMetaInfo, targetDDLConfig));
|
|
|
|
+ // 4.合并驱动配置 & 更新TableGroup.command 合并驱动应该不需要了,我只是把该替换的地方替换掉了,原来的还是保持一致,应该需要更新TableGroup.command
|
|
|
|
+ // 4.1 参考 org.dbsyncer.biz.checker.impl.tablegroup.TableGroupChecker.mergeConfig
|
|
|
|
+ Map<String, String> commands = parser.getCommand(mapping, tableGroup);
|
|
|
|
+ tableGroup.setCommand(commands);
|
|
|
|
+ // 5.持久化存储 & 更新缓存
|
|
|
|
+ // 5.1 参考 org.dbsyncer.manager.ManagerFactory.editConfigModel
|
|
|
|
+ // 将方法移动到parser模块,就可以复用实现
|
|
|
|
+ flushCache(tableGroup);
|
|
|
|
+ applicationContext.publishEvent(new RefreshOffsetEvent(applicationContext, response.getOffsetList()));
|
|
|
|
+ flushStrategy.flushIncrementData(mapping.getMetaId(), result, response.getEvent());
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
+ logger.error(e.getMessage(), e);
|
|
|
|
+ }
|
|
|
|
+ logger.warn("暂只支持MYSQL解析DDL");
|
|
|
|
+ }
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* 获取连接器配置
|
|
* 获取连接器配置
|
|
*
|
|
*
|
|
@@ -152,7 +245,30 @@ public class GeneralBufferActuator extends AbstractBufferActuator<WriterRequest,
|
|
return conn.getConfig();
|
|
return conn.getConfig();
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /**
|
|
|
|
+ * 持久化驱动配置
|
|
|
|
+ *
|
|
|
|
+ * @param tableGroup
|
|
|
|
+ */
|
|
|
|
+ private void flushCache(TableGroup tableGroup) {
|
|
|
|
+ // 1、解析配置
|
|
|
|
+ ConfigModel model = tableGroup;
|
|
|
|
+ model.setCreateTime(new Date().getTime());
|
|
|
|
+ model.setUpdateTime(new Date().getTime());
|
|
|
|
+ Assert.notNull(model, "ConfigModel can not be null.");
|
|
|
|
+
|
|
|
|
+ // 2、持久化
|
|
|
|
+ Map<String, Object> params = ConfigModelUtil.convertModelToMap(model);
|
|
|
|
+ logger.debug("params:{}", params);
|
|
|
|
+ storageService.edit(StorageEnum.CONFIG, params);
|
|
|
|
+
|
|
|
|
+ // 3、缓存
|
|
|
|
+ Assert.notNull(model, "ConfigModel can not be null.");
|
|
|
|
+ cacheService.put(model.getId(), model);
|
|
|
|
+ }
|
|
|
|
+
|
|
public void setGeneralExecutor(Executor generalExecutor) {
|
|
public void setGeneralExecutor(Executor generalExecutor) {
|
|
this.generalExecutor = generalExecutor;
|
|
this.generalExecutor = generalExecutor;
|
|
}
|
|
}
|
|
|
|
+
|
|
}
|
|
}
|