205 lines
7.9 KiB
C#

using JiShe.CollectBus.Common;
using JiShe.CollectBus.Common.DeviceBalanceControl;
using JiShe.CollectBus.IoTDB.Context;
using JiShe.CollectBus.IoTDB.Interface;
using JiShe.CollectBus.IotSystems.MeterReadingRecords;
using JiShe.CollectBus.Kafka.Internal;
using JiShe.CollectBus.Kafka.Producer;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Threading;
using System.Threading.Channels;
using System.Threading.Tasks;
using Volo.Abp.DependencyInjection;
namespace JiShe.CollectBus.DataChannels
{
/// <summary>
/// 数据通道管理服务
/// </summary>
public class DataChannelManageService : IDataChannelManageService, ITransientDependency
{
private readonly ILogger<DataChannelManageService> _logger;
private readonly IIoTDbProvider _dbProvider;
private readonly IProducerService _producerService;
private readonly KafkaOptionConfig _kafkaOptions;
private readonly ServerApplicationOptions _applicationOptions;
private readonly IoTDBRuntimeContext _runtimeContext;
public DataChannelManageService(
ILogger<DataChannelManageService> logger,
IIoTDbProvider dbProvider,
IoTDBRuntimeContext runtimeContext,
IProducerService producerService,
IOptions<KafkaOptionConfig> kafkaOptions,
IOptions<ServerApplicationOptions> applicationOptions)
{
_logger = logger;
_dbProvider = dbProvider;
_runtimeContext = runtimeContext;
_producerService = producerService;
_kafkaOptions = kafkaOptions.Value;
_applicationOptions = applicationOptions.Value;
_runtimeContext.UseTableSessionPool = true;
}
/// <summary>
/// 定时任务数据通道写入
/// </summary>
/// <returns></returns>
public async Task ScheduledMeterTaskWriterAsync(ChannelWriter<ValueTuple<string, List<MeterReadingTelemetryPacketInfo>>> _telemetryPacketInfoWriter, ValueTuple<string, List<MeterReadingTelemetryPacketInfo>> dataItems)
{
await _telemetryPacketInfoWriter.WriteAsync(dataItems);
}
/// <summary>
/// 定时任务数据入库和Kafka推送通道
/// </summary>
public async Task ScheduledMeterTaskReadingAsync(
ChannelReader<ValueTuple<string, List<MeterReadingTelemetryPacketInfo>>> telemetryPacketInfoReader)
{
const int BatchSize = 20000;
const int EmptyWaitMilliseconds = 1000;
var timeout = TimeSpan.FromSeconds(5);
var timer = Stopwatch.StartNew();
long timeoutMilliseconds = 0;
var metadata = await _dbProvider.GetMetadata<MeterReadingTelemetryPacketInfo>();
try
{
while (true)
{
var batch = new List<ValueTuple<string, List<MeterReadingTelemetryPacketInfo>>>();
var canRead = telemetryPacketInfoReader.Count;
if (canRead <= 0)
{
if (timeoutMilliseconds > 0)
{
_logger.LogError($"{nameof(ScheduledMeterTaskReadingAsync)} 通道处理数据耗时{timeoutMilliseconds}毫秒");
}
timeoutMilliseconds = 0;
//无消息时短等待1秒
await Task.Delay(EmptyWaitMilliseconds);
continue;
}
timer.Restart();
var startTime = DateTime.Now;
try
{
// 异步批量读取数据
while (batch != null && batch.Count < BatchSize && (DateTime.Now - startTime) < timeout)
{
try
{
if (telemetryPacketInfoReader.TryRead(out var dataItem))
{
batch.Add(dataItem);
}
}
catch (Exception)
{
throw;
}
}
}
catch (Exception)
{
throw;
}
if (batch.Count == 0)
{
await Task.Delay(EmptyWaitMilliseconds);
continue;
}
// 按TopicName分组处理
var topicGroups = new Dictionary<string, List<MeterReadingTelemetryPacketInfo>>();
foreach (var (topicName, records) in batch)
{
if (!topicGroups.TryGetValue(topicName, out var list))
{
list = new List<MeterReadingTelemetryPacketInfo>();
topicGroups[topicName] = list;
}
list.AddRange(records);
}
// 处理每个分组
foreach (var (topicName, records) in topicGroups)
{
try
{
// 批量写入数据库
await _dbProvider.BatchInsertAsync(metadata, records);
//// 限流推送Kafka
//await DeviceGroupBalanceControl.ProcessWithThrottleAsync(
// items: records,
// deviceIdSelector: data => data.DeviceId,
// processor: async (data, groupIndex) =>
// await KafkaProducerIssuedMessageAction(topicName, data, groupIndex)
//);
}
catch (Exception ex)
{
_logger.LogError(ex, "数据通道处理主题 {TopicName} 数据时发生异常", topicName);
}
}
batch.Clear();
timer.Stop();
timeoutMilliseconds = timeoutMilliseconds + timer.ElapsedMilliseconds;
startTime = DateTime.Now;
}
}
catch (Exception ex)
{
_logger.LogCritical(ex, "定时任务处理发生致命错误");
throw;
}
}
/// <summary>
/// Kafka推送消息(增加重试机制和参数校验)
/// </summary>
protected async Task KafkaProducerIssuedMessageAction<T>(
string topicName,
T taskRecord,
int partition) where T : class
{
if (string.IsNullOrWhiteSpace(topicName) || taskRecord == null)
{
throw new Exception($"{nameof(KafkaProducerIssuedMessageAction)} 推送消息失败,参数异常,-101");
}
const int maxRetries = 3;//重试次数
for (int retry = 0; retry < maxRetries; retry++)
{
try
{
await _producerService.ProduceAsync(topicName, taskRecord, partition);
return;
}
catch (Exception ex)
{
_logger.LogWarning(ex, "Kafka推送{topicName}主题分区{partition}重试中({Retry}/{MaxRetries})", topicName, partition, retry + 1, maxRetries);
if (retry == maxRetries - 1) throw;
await Task.Delay(1000 * (retry + 1));
}
}
}
}
}