|
@@ -1,10 +1,8 @@
|
|
|
package org.dbsyncer.parser;
|
|
|
|
|
|
import org.dbsyncer.cache.CacheService;
|
|
|
-import org.dbsyncer.parser.event.FullRefreshEvent;
|
|
|
import org.dbsyncer.common.event.RowChangedEvent;
|
|
|
import org.dbsyncer.common.model.Result;
|
|
|
-import org.dbsyncer.parser.model.Task;
|
|
|
import org.dbsyncer.common.util.CollectionUtils;
|
|
|
import org.dbsyncer.common.util.JsonUtil;
|
|
|
import org.dbsyncer.common.util.StringUtil;
|
|
@@ -18,10 +16,13 @@ import org.dbsyncer.connector.enums.OperationEnum;
|
|
|
import org.dbsyncer.listener.enums.QuartzFilterEnum;
|
|
|
import org.dbsyncer.parser.enums.ConvertEnum;
|
|
|
import org.dbsyncer.parser.enums.ParserEnum;
|
|
|
+import org.dbsyncer.parser.event.FullRefreshEvent;
|
|
|
+import org.dbsyncer.parser.flush.BufferActuator;
|
|
|
+import org.dbsyncer.parser.flush.FlushStrategy;
|
|
|
+import org.dbsyncer.parser.flush.model.WriterRequest;
|
|
|
import org.dbsyncer.parser.logger.LogService;
|
|
|
import org.dbsyncer.parser.logger.LogType;
|
|
|
import org.dbsyncer.parser.model.*;
|
|
|
-import org.dbsyncer.parser.strategy.FlushStrategy;
|
|
|
import org.dbsyncer.parser.util.ConvertUtil;
|
|
|
import org.dbsyncer.parser.util.PickerUtil;
|
|
|
import org.dbsyncer.plugin.PluginFactory;
|
|
@@ -39,8 +40,10 @@ import org.springframework.stereotype.Component;
|
|
|
import org.springframework.util.Assert;
|
|
|
|
|
|
import java.time.Instant;
|
|
|
-import java.util.*;
|
|
|
-import java.util.concurrent.ConcurrentLinkedQueue;
|
|
|
+import java.util.ArrayList;
|
|
|
+import java.util.Arrays;
|
|
|
+import java.util.List;
|
|
|
+import java.util.Map;
|
|
|
import java.util.concurrent.CountDownLatch;
|
|
|
import java.util.concurrent.Executor;
|
|
|
|
|
@@ -76,6 +79,9 @@ public class ParserFactory implements Parser {
|
|
|
@Autowired
|
|
|
private ApplicationContext applicationContext;
|
|
|
|
|
|
+ @Autowired
|
|
|
+ private BufferActuator writerBufferActuator;
|
|
|
+
|
|
|
@Override
|
|
|
public ConnectorMapper connect(ConnectorConfig config) {
|
|
|
return connectorFactory.connect(config);
|
|
@@ -265,8 +271,8 @@ public class ParserFactory implements Parser {
|
|
|
params.putIfAbsent(ParserEnum.PAGE_INDEX.getCode(), ParserEnum.PAGE_INDEX.getDefaultValue());
|
|
|
int pageSize = mapping.getReadNum();
|
|
|
int batchSize = mapping.getBatchNum();
|
|
|
- ConnectorMapper sConnectionMapper = connectorFactory.connect(sConfig);
|
|
|
- ConnectorMapper tConnectionMapper = connectorFactory.connect(tConfig);
|
|
|
+ ConnectorMapper sConnectorMapper = connectorFactory.connect(sConfig);
|
|
|
+ ConnectorMapper tConnectorMapper = connectorFactory.connect(tConfig);
|
|
|
|
|
|
for (; ; ) {
|
|
|
if (!task.isRunning()) {
|
|
@@ -276,7 +282,7 @@ public class ParserFactory implements Parser {
|
|
|
|
|
|
// 1、获取数据源数据
|
|
|
int pageIndex = Integer.parseInt(params.get(ParserEnum.PAGE_INDEX.getCode()));
|
|
|
- Result reader = connectorFactory.reader(sConnectionMapper, new ReaderConfig(command, new ArrayList<>(), pageIndex, pageSize));
|
|
|
+ Result reader = connectorFactory.reader(sConnectorMapper, new ReaderConfig(command, new ArrayList<>(), pageIndex, pageSize));
|
|
|
List<Map> data = reader.getData();
|
|
|
if (CollectionUtils.isEmpty(data)) {
|
|
|
params.clear();
|
|
@@ -294,7 +300,7 @@ public class ParserFactory implements Parser {
|
|
|
pluginFactory.convert(group.getPlugin(), data, target);
|
|
|
|
|
|
// 5、写入目标源
|
|
|
- Result writer = writeBatch(tConnectionMapper, command, picker.getTargetFields(), target, batchSize);
|
|
|
+ Result writer = writeBatch(tConnectorMapper, command, ConnectorConstant.OPERTION_INSERT, picker.getTargetFields(), target, batchSize);
|
|
|
|
|
|
// 6、更新结果
|
|
|
flush(task, writer, target);
|
|
@@ -323,11 +329,67 @@ public class ParserFactory implements Parser {
|
|
|
// 3、插件转换
|
|
|
pluginFactory.convert(tableGroup.getPlugin(), event, data, target);
|
|
|
|
|
|
- // 4、写入目标源
|
|
|
- Result writer = connectorFactory.writer(tConnectorMapper, new WriterSingleConfig(picker.getTargetFields(), tableGroup.getCommand(), event, target, rowChangedEvent.getTableName(), rowChangedEvent.isForceUpdate()));
|
|
|
+ // 4、写入缓冲执行器
|
|
|
+ writerBufferActuator.offer(new WriterRequest(metaId, tableGroup.getId(), event, tConnectorMapper, picker.getTargetFields(), tableGroup.getCommand(), target));
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 批量写入
|
|
|
+ *
|
|
|
+ * @param connectorMapper
|
|
|
+ * @param command
|
|
|
+ * @param fields
|
|
|
+ * @param dataList
|
|
|
+ * @param batchSize
|
|
|
+ * @return
|
|
|
+ */
|
|
|
+ @Override
|
|
|
+ public Result writeBatch(ConnectorMapper connectorMapper, Map<String, String> command, String event, List<Field> fields, List<Map> dataList, int batchSize) {
|
|
|
+ // 总数
|
|
|
+ int total = dataList.size();
|
|
|
+ // 单次任务
|
|
|
+ if (total <= batchSize) {
|
|
|
+ return connectorFactory.writer(connectorMapper, new WriterBatchConfig(event, command, fields, dataList));
|
|
|
+ }
|
|
|
+
|
|
|
+ // 批量任务, 拆分
|
|
|
+ int taskSize = total % batchSize == 0 ? total / batchSize : total / batchSize + 1;
|
|
|
|
|
|
- // 5、更新结果
|
|
|
- flushStrategy.flushIncrementData(metaId, writer, event, picker.getTargetMapList());
|
|
|
+ final Result result = new Result();
|
|
|
+ final CountDownLatch latch = new CountDownLatch(taskSize);
|
|
|
+ int fromIndex = 0;
|
|
|
+ int toIndex = batchSize;
|
|
|
+ for (int i = 0; i < taskSize; i++) {
|
|
|
+ final List<Map> data;
|
|
|
+ if (toIndex > total) {
|
|
|
+ toIndex = fromIndex + (total % batchSize);
|
|
|
+ data = dataList.subList(fromIndex, toIndex);
|
|
|
+ } else {
|
|
|
+ data = dataList.subList(fromIndex, toIndex);
|
|
|
+ fromIndex += batchSize;
|
|
|
+ toIndex += batchSize;
|
|
|
+ }
|
|
|
+
|
|
|
+ taskExecutor.execute(() -> {
|
|
|
+ try {
|
|
|
+ Result w = connectorFactory.writer(connectorMapper, new WriterBatchConfig(event, command, fields, data));
|
|
|
+ // CAS
|
|
|
+ result.getFailData().addAll(w.getFailData());
|
|
|
+ result.getFail().getAndAdd(w.getFail().get());
|
|
|
+ result.getError().append(w.getError());
|
|
|
+ } catch (Exception e) {
|
|
|
+ result.getError().append(e.getMessage()).append(System.lineSeparator());
|
|
|
+ } finally {
|
|
|
+ latch.countDown();
|
|
|
+ }
|
|
|
+ });
|
|
|
+ }
|
|
|
+ try {
|
|
|
+ latch.await();
|
|
|
+ } catch (InterruptedException e) {
|
|
|
+ logger.error(e.getMessage());
|
|
|
+ }
|
|
|
+ return result;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -384,66 +446,4 @@ public class ParserFactory implements Parser {
|
|
|
return connector.getConfig();
|
|
|
}
|
|
|
|
|
|
- /**
|
|
|
- * 批量写入
|
|
|
- *
|
|
|
- * @param connectorMapper
|
|
|
- * @param command
|
|
|
- * @param fields
|
|
|
- * @param target
|
|
|
- * @param batchSize
|
|
|
- * @return
|
|
|
- */
|
|
|
- private Result writeBatch(ConnectorMapper connectorMapper, Map<String, String> command, List<Field> fields, List<Map> target, int batchSize) {
|
|
|
- // 总数
|
|
|
- int total = target.size();
|
|
|
- // 单次任务
|
|
|
- if (total <= batchSize) {
|
|
|
- return connectorFactory.writer(connectorMapper, new WriterBatchConfig(command, fields, target));
|
|
|
- }
|
|
|
-
|
|
|
- // 批量任务, 拆分
|
|
|
- int taskSize = total % batchSize == 0 ? total / batchSize : total / batchSize + 1;
|
|
|
-
|
|
|
- // 转换为消息队列,并发写入
|
|
|
- Queue<Map> queue = new ConcurrentLinkedQueue<>(target);
|
|
|
-
|
|
|
- final Result result = new Result();
|
|
|
- final CountDownLatch latch = new CountDownLatch(taskSize);
|
|
|
- for (int i = 0; i < taskSize; i++) {
|
|
|
- taskExecutor.execute(() -> {
|
|
|
- try {
|
|
|
- Result w = parallelTask(batchSize, queue, connectorMapper, command, fields);
|
|
|
- // CAS
|
|
|
- result.getFailData().addAll(w.getFailData());
|
|
|
- result.getFail().getAndAdd(w.getFail().get());
|
|
|
- result.getError().append(w.getError());
|
|
|
- } catch (Exception e) {
|
|
|
- result.getError().append(e.getMessage()).append(System.lineSeparator());
|
|
|
- } finally {
|
|
|
- latch.countDown();
|
|
|
- }
|
|
|
- });
|
|
|
- }
|
|
|
- try {
|
|
|
- latch.await();
|
|
|
- } catch (InterruptedException e) {
|
|
|
- logger.error(e.getMessage());
|
|
|
- }
|
|
|
- return result;
|
|
|
- }
|
|
|
-
|
|
|
- private Result parallelTask(int batchSize, Queue<Map> queue, ConnectorMapper connectorMapper, Map<String, String> command,
|
|
|
- List<Field> fields) {
|
|
|
- List<Map> data = new ArrayList<>();
|
|
|
- for (int j = 0; j < batchSize; j++) {
|
|
|
- Map poll = queue.poll();
|
|
|
- if (null == poll) {
|
|
|
- break;
|
|
|
- }
|
|
|
- data.add(poll);
|
|
|
- }
|
|
|
- return connectorFactory.writer(connectorMapper, new WriterBatchConfig(command, fields, data));
|
|
|
- }
|
|
|
-
|
|
|
}
|