707 lines
33 KiB
C#
Raw Normal View History

2025-04-09 14:33:20 +08:00
using Confluent.Kafka;
using JiShe.CollectBus.Common;
2025-04-17 18:08:27 +08:00
using JiShe.CollectBus.Common.Consts;
using JiShe.CollectBus.Common.Helpers;
using JiShe.CollectBus.Kafka.Internal;
using JiShe.CollectBus.Kafka.Serialization;
using Microsoft.AspNetCore.DataProtection.KeyManagement;
2025-04-09 14:33:20 +08:00
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
2025-04-17 13:54:18 +08:00
using Microsoft.Extensions.Options;
2025-04-15 15:49:22 +08:00
using System.Collections.Concurrent;
2025-04-16 18:26:25 +08:00
using System.Text;
using System.Text.RegularExpressions;
using System.Threading;
using YamlDotNet.Core.Tokens;
2025-04-09 14:33:20 +08:00
namespace JiShe.CollectBus.Kafka.Consumer
{
2025-04-15 15:49:22 +08:00
public class ConsumerService : IConsumerService, IDisposable
2025-04-09 14:33:20 +08:00
{
2025-04-15 15:49:22 +08:00
private readonly ILogger<ConsumerService> _logger;
/// <summary>
/// 消费者存储
/// Key 格式:{groupId}_{topic}_{TKey}_{TValue}
/// </summary>
private readonly ConcurrentDictionary<string, (object Consumer, CancellationTokenSource CTS)>
2025-04-15 15:49:22 +08:00
_consumerStore = new();
/// <summary>
/// 消费完或者无数据时的延迟时间
/// </summary>
private static TimeSpan DelayTime => TimeSpan.FromMilliseconds(100);
2025-04-17 11:42:35 +08:00
private readonly KafkaOptionConfig _kafkaOptionConfig;
2025-04-09 14:33:20 +08:00
private readonly ServerApplicationOptions _applicationOptions;
private readonly KafkaPollyPipeline _kafkaPollyPipeline;
2025-04-21 10:17:40 +08:00
/// <summary>
/// ConsumerService
/// </summary>
/// <param name="logger"></param>
/// <param name="kafkaOptionConfig"></param>
public ConsumerService(ILogger<ConsumerService> logger, IOptions<KafkaOptionConfig> kafkaOptionConfig, KafkaPollyPipeline kafkaPollyPipeline, IOptions<ServerApplicationOptions> applicationOptions)
2025-04-09 14:33:20 +08:00
{
_logger = logger;
2025-04-17 13:54:18 +08:00
_kafkaOptionConfig = kafkaOptionConfig.Value;
_applicationOptions = applicationOptions.Value;
_kafkaPollyPipeline = kafkaPollyPipeline;
2025-04-12 15:11:18 +08:00
}
2025-04-09 14:33:20 +08:00
2025-04-15 15:49:22 +08:00
#region private
2025-04-09 14:33:20 +08:00
2025-04-15 15:49:22 +08:00
/// <summary>
/// 创建消费者
/// </summary>
/// <typeparam name="TKey"></typeparam>
/// <typeparam name="TValue"></typeparam>
/// <returns></returns>
private IConsumer<TKey, TValue> CreateConsumer<TKey, TValue>(string? groupId = null) where TKey : notnull where TValue : class
{
var config = BuildConsumerConfig(groupId);
return new ConsumerBuilder<TKey, TValue>(config)
2025-04-16 18:26:25 +08:00
.SetValueDeserializer(new JsonSerializer<TValue>())
.SetLogHandler((_, log) => _logger.LogInformation($"消费者Log: {log.Message}"))
2025-04-15 15:49:22 +08:00
.SetErrorHandler((_, e) => _logger.LogError($"消费者错误: {e.Reason}"))
.Build();
}
2025-04-09 14:33:20 +08:00
2025-04-15 15:49:22 +08:00
private ConsumerConfig BuildConsumerConfig(string? groupId = null)
2025-04-09 14:33:20 +08:00
{
2025-04-15 15:49:22 +08:00
var config = new ConsumerConfig
2025-04-12 15:11:18 +08:00
{
2025-04-17 11:42:35 +08:00
BootstrapServers = _kafkaOptionConfig.BootstrapServers,
GroupId = groupId ?? _applicationOptions.ServerTagName,
2025-04-14 19:10:27 +08:00
AutoOffsetReset = AutoOffsetReset.Earliest,
EnableAutoCommit = false, // 禁止AutoCommit
EnablePartitionEof = true, // 启用分区末尾标记
//AllowAutoCreateTopics = true, // 启用自动创建
2025-04-16 18:26:25 +08:00
FetchMaxBytes = 1024 * 1024 * 50 // 增加拉取大小50MB
2025-04-12 15:11:18 +08:00
};
2025-04-09 14:33:20 +08:00
2025-04-17 11:42:35 +08:00
if (_kafkaOptionConfig.EnableAuthorization)
2025-04-12 15:11:18 +08:00
{
2025-04-17 11:42:35 +08:00
config.SecurityProtocol = _kafkaOptionConfig.SecurityProtocol;
config.SaslMechanism = _kafkaOptionConfig.SaslMechanism;
config.SaslUsername = _kafkaOptionConfig.SaslUserName;
config.SaslPassword = _kafkaOptionConfig.SaslPassword;
2025-04-12 15:11:18 +08:00
}
2025-04-15 15:49:22 +08:00
return config;
}
#endregion
/// <summary>
/// 订阅消息
/// </summary>
/// <typeparam name="TKey"></typeparam>
/// <typeparam name="TValue"></typeparam>
/// <param name="topic"></param>
/// <param name="messageHandler"></param>
/// <returns></returns>
public async Task SubscribeAsync<TKey, TValue>(string topic, Func<TKey, TValue, Task<bool>> messageHandler, string? groupId = null) where TKey : notnull where TValue : class
{
await SubscribeAsync<TKey, TValue>(new[] { topic }, messageHandler, groupId);
2025-04-09 14:33:20 +08:00
}
2025-04-15 15:49:22 +08:00
/// <summary>
/// 订阅消息
/// </summary>
/// <typeparam name="TValue"></typeparam>
/// <param name="topic"></param>
/// <param name="messageHandler"></param>
/// <returns></returns>
public async Task SubscribeAsync<TValue>(string topic, Func<TValue, Task<bool>> messageHandler, string? groupId = null) where TValue : class
{
await SubscribeAsync<TValue>(new[] { topic }, messageHandler, groupId);
2025-04-15 15:49:22 +08:00
}
/// <summary>
/// 订阅消息
/// </summary>
/// <typeparam name="TKey"></typeparam>
/// <typeparam name="TValue"></typeparam>
/// <param name="topics"></param>
/// <param name="messageHandler"></param>
/// <returns></returns>
public async Task SubscribeAsync<TKey, TValue>(string[] topics, Func<TKey, TValue, Task<bool>> messageHandler, string? groupId = null) where TKey : notnull where TValue : class
2025-04-09 14:33:20 +08:00
{
try
{
await _kafkaPollyPipeline.KafkaPipeline.ExecuteAsync(async token =>
{
var consumerKey = $"{groupId}_{string.Join("_", topics)}_{typeof(TKey).Name}_{typeof(TValue).Name}";
var consumerStore = _consumerStore.GetOrAdd(consumerKey, _ =>
(
CreateConsumer<TKey, TValue>(groupId),
new CancellationTokenSource()
));
if (consumerStore.Consumer == null)
{
_logger.LogWarning($"{string.Join("", topics)}创建消息消费失败或消费组已被释放");
return;
}
var consumer = consumerStore.Consumer as IConsumer<TKey, TValue>;
var cts = consumerStore.CTS;
consumer!.Subscribe(topics);
_ = Task.Run(async () =>
2025-04-12 15:11:18 +08:00
{
while (!cts.IsCancellationRequested)
2025-04-16 18:26:25 +08:00
{
try
{
//_logger.LogInformation($"Kafka消费: {string.Join("", topics)} 开始拉取消息....");
var result = consumer.Consume(cts.Token);
if (result == null || result.Message == null || result.Message.Value == null)
{
await Task.Delay(DelayTime, cts.Token);
continue;
}
if (result.IsPartitionEOF)
{
#if DEBUG
_logger.LogInformation("Kafka消费: {Topic} 分区 {Partition} 已消费完", result.Topic, result.Partition);
#endif
await Task.Delay(DelayTime, cts.Token);
continue;
}
if (_kafkaOptionConfig.EnableFilter)
{
var headersFilter = new HeadersFilter { { "route-key", Encoding.UTF8.GetBytes(_applicationOptions.ServerTagName) } };
// 检查 Header 是否符合条件
if (!headersFilter.Match(result.Message.Headers))
{
consumer.Commit(result); // 提交偏移量
// 跳过消息
continue;
}
}
bool sucess = await messageHandler(result.Message.Key, result.Message.Value);
if (sucess)
consumer.Commit(result); // 手动提交
}
catch (ConsumeException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
2025-04-16 20:41:52 +08:00
{
_logger.LogError(ex, $"{string.Join("", topics)}消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
2025-04-16 20:41:52 +08:00
}
catch (KafkaException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
{
_logger.LogError(ex, $"{string.Join("", topics)} 消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
}
catch (OperationCanceledException)
{
//ignore
}
catch (ObjectDisposedException)
{
_logger.LogError($"{string.Join("", topics)}消费者被释放");
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "处理消息时发生未知错误");
}
2025-04-15 15:49:22 +08:00
}
}, cts.Token);
await Task.CompletedTask;
});
}
catch (Exception ex)
{
throw;
}
2025-04-09 14:33:20 +08:00
}
2025-04-15 15:49:22 +08:00
2025-04-15 11:15:22 +08:00
/// <summary>
2025-04-15 15:49:22 +08:00
/// 订阅消息
2025-04-15 11:15:22 +08:00
/// </summary>
2025-04-15 15:49:22 +08:00
/// <typeparam name="TValue"></typeparam>
2025-04-15 11:15:22 +08:00
/// <param name="topics"></param>
/// <param name="messageHandler"></param>
2025-04-21 10:17:40 +08:00
/// <param name="groupId"></param>
2025-04-15 11:15:22 +08:00
/// <returns></returns>
2025-04-15 15:49:22 +08:00
public async Task SubscribeAsync<TValue>(string[] topics, Func<TValue, Task<bool>> messageHandler, string? groupId) where TValue : class
2025-04-15 11:15:22 +08:00
{
try
{
await _kafkaPollyPipeline.KafkaPipeline.ExecuteAsync(async token =>
{
var consumerKey = $"{groupId}_{string.Join("_", topics)}_{typeof(Ignore).Name}_{typeof(TValue).Name}";
var consumerStore = _consumerStore.GetOrAdd(consumerKey, _ =>
(
CreateConsumer<Ignore, TValue>(groupId),
new CancellationTokenSource()
));
if (consumerStore.Consumer == null)
{
_logger.LogWarning($"{string.Join("", topics)}创建消息消费失败或消费组已被释放");
return;
}
var consumer = consumerStore.Consumer as IConsumer<Ignore, TValue>;
var cts = consumerStore.CTS;
consumer!.Subscribe(topics);
_ = Task.Run(async () =>
2025-04-15 11:15:22 +08:00
{
int count = 0;
while (!cts.IsCancellationRequested)
2025-04-16 18:26:25 +08:00
{
try
2025-04-16 20:41:52 +08:00
{
//_logger.LogInformation($"Kafka消费: {string.Join("", topics)}_{count} 开始拉取消息....");
count++;
var result = consumer.Consume(cts.Token);
if (result == null || result.Message == null || result.Message.Value == null)
{
await Task.Delay(DelayTime, cts.Token);
continue;
}
if (result.IsPartitionEOF)
{
#if DEBUG
_logger.LogInformation("Kafka消费: {Topic} 分区 {Partition} 已消费完", result.Topic, result.Partition);
#endif
await Task.Delay(DelayTime, cts.Token);
continue;
}
if (_kafkaOptionConfig.EnableFilter)
{
var headersFilter = new HeadersFilter { { "route-key", Encoding.UTF8.GetBytes(_applicationOptions.ServerTagName) } };
// 检查 Header 是否符合条件
if (!headersFilter.Match(result.Message.Headers))
{
consumer.Commit(result); // 提交偏移量
// 跳过消息
continue;
}
}
bool sucess = await messageHandler(result.Message.Value);
if (sucess)
consumer.Commit(result); // 手动提交
//else
// consumer.StoreOffset(result);
}
catch (ConsumeException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
{
_logger.LogError(ex, $"{string.Join("", topics)}消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
}
catch (OperationCanceledException)
{
//ignore
}
catch (ObjectDisposedException)
{
_logger.LogError($"{string.Join("", topics)}消费者被释放");
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "处理消息时发生未知错误");
}
}
}, cts.Token);
await Task.CompletedTask;
});
}
catch (Exception ex)
{
throw;
}
2025-04-15 11:15:22 +08:00
}
2025-04-16 18:26:25 +08:00
/// <summary>
/// 批量订阅消息
/// </summary>
/// <typeparam name="TKey">消息Key类型</typeparam>
/// <typeparam name="TValue">消息Value类型</typeparam>
/// <param name="topic">主题</param>
/// <param name="messageBatchHandler">批量消息处理函数</param>
/// <param name="groupId">消费组ID</param>
/// <param name="batchSize">批次大小</param>
/// <param name="batchTimeout">批次超时时间</param>
public async Task SubscribeBatchAsync<TKey, TValue>(string topic, Func<List<TValue>, Task<bool>> messageBatchHandler, string? groupId = null, int batchSize = 100, TimeSpan? batchTimeout = null) where TKey : notnull where TValue : class
2025-04-16 18:26:25 +08:00
{
try
{
await SubscribeBatchAsync<TKey, TValue>(new[] { topic }, messageBatchHandler, groupId, batchSize, batchTimeout);
}
catch (Exception ex)
{
throw;
}
2025-04-16 18:26:25 +08:00
}
/// <summary>
/// 批量订阅消息
/// </summary>
/// <typeparam name="TKey">消息Key类型</typeparam>
/// <typeparam name="TValue">消息Value类型</typeparam>
/// <param name="topics">主题列表</param>
/// <param name="messageBatchHandler">批量消息处理函数</param>
/// <param name="groupId">消费组ID</param>
/// <param name="batchSize">批次大小</param>
/// <param name="batchTimeout">批次超时时间</param>
public async Task SubscribeBatchAsync<TKey, TValue>(string[] topics, Func<List<TValue>, Task<bool>> messageBatchHandler, string? groupId = null, int batchSize = 100, TimeSpan? batchTimeout = null) where TKey : notnull where TValue : class
2025-04-16 18:26:25 +08:00
{
try
{
await _kafkaPollyPipeline.KafkaPipeline.ExecuteAsync(async token =>
{
2025-04-16 18:26:25 +08:00
var consumerKey = $"{groupId}_{string.Join("_", topics)}_{typeof(TKey).Name}_{typeof(TValue).Name}";
var consumerStore = _consumerStore.GetOrAdd(consumerKey, _ =>
(
CreateConsumer<TKey, TValue>(groupId),
new CancellationTokenSource()
));
if (consumerStore.Consumer == null)
{
_logger.LogWarning($"{string.Join("", topics)}创建消息消费失败或消费组已被释放");
return;
}
var consumer = consumerStore.Consumer as IConsumer<TKey, TValue>;
var cts = consumerStore.CTS;
2025-04-16 18:26:25 +08:00
consumer!.Subscribe(topics);
2025-04-16 18:26:25 +08:00
var timeout = batchTimeout ?? TimeSpan.FromSeconds(5); // 默认超时时间调整为5秒
_ = Task.Run(async () =>
2025-04-16 18:26:25 +08:00
{
var messages = new List<(TValue Value, TopicPartitionOffset Offset)>();
var startTime = DateTime.UtcNow;
while (!cts.IsCancellationRequested)
2025-04-16 18:26:25 +08:00
{
try
2025-04-16 18:26:25 +08:00
{
// 非阻塞快速累积消息
while (messages.Count < batchSize && (DateTime.UtcNow - startTime) < timeout)
2025-04-16 18:26:25 +08:00
{
var result = consumer.Consume(TimeSpan.Zero); // 非阻塞调用
if (result != null)
{
if (result.IsPartitionEOF)
{
#if DEBUG
_logger.LogInformation("Kafka消费: {Topic} 分区 {Partition} 已消费完", result.Topic, result.Partition);
#endif
await Task.Delay(DelayTime, cts.Token);
}
else if (result.Message.Value != null)
2025-04-16 20:41:52 +08:00
{
if (_kafkaOptionConfig.EnableFilter)
{
var headersFilter = new HeadersFilter { { "route-key", Encoding.UTF8.GetBytes(_applicationOptions.ServerTagName) } };
// 检查 Header 是否符合条件
if (!headersFilter.Match(result.Message.Headers))
{
consumer.Commit(result); // 提交偏移量
// 跳过消息
continue;
}
}
messages.Add((result.Message.Value, result.TopicPartitionOffset));
2025-04-16 20:41:52 +08:00
}
}
else
{
// 无消息时短暂等待
await Task.Delay(DelayTime, cts.Token);
2025-04-16 18:26:25 +08:00
}
}
2025-04-16 18:26:25 +08:00
// 处理批次
if (messages.Count > 0)
2025-04-16 18:26:25 +08:00
{
bool success = await messageBatchHandler(messages.Select(m => m.Value).ToList());
if (success)
2025-04-16 18:26:25 +08:00
{
var offsetsByPartition = new Dictionary<TopicPartition, long>();
foreach (var msg in messages)
{
var tp = msg.Offset.TopicPartition;
var offset = msg.Offset.Offset;
if (!offsetsByPartition.TryGetValue(tp, out var currentMax) || offset > currentMax)
{
offsetsByPartition[tp] = offset;
}
}
2025-04-16 18:26:25 +08:00
var offsetsToCommit = offsetsByPartition
.Select(kv => new TopicPartitionOffset(kv.Key, new Offset(kv.Value + 1)))
.ToList();
consumer.Commit(offsetsToCommit);
}
messages.Clear();
}
2025-04-16 18:26:25 +08:00
startTime = DateTime.UtcNow;
}
catch (ConsumeException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
{
_logger.LogError(ex, $"{string.Join("", topics)} 消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
}
catch (KafkaException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
{
_logger.LogError(ex, $"{string.Join("", topics)} 消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
}
catch (OperationCanceledException)
{
//ignore
}
catch (ObjectDisposedException)
{
_logger.LogError($"{string.Join("", topics)}消费者被释放");
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "处理批量消息时发生未知错误");
}
}
}, cts.Token);
await Task.CompletedTask;
});
}
catch (Exception ex)
{
2025-04-16 18:26:25 +08:00
throw;
}
2025-04-16 18:26:25 +08:00
}
/// <summary>
/// 批量订阅消息
/// </summary>
/// <typeparam name="TValue">消息Value类型</typeparam>
/// <param name="topic">主题列表</param>
/// <param name="messageBatchHandler">批量消息处理函数</param>
/// <param name="groupId">消费组ID</param>
/// <param name="batchSize">批次大小</param>
/// <param name="batchTimeout">批次超时时间</param>
/// <param name="consumeTimeout">消费等待时间</param>
public async Task SubscribeBatchAsync<TValue>(string topic, Func<List<TValue>, Task<bool>> messageBatchHandler, string? groupId = null, int batchSize = 100, TimeSpan? batchTimeout = null, TimeSpan? consumeTimeout = null) where TValue : class
2025-04-16 18:26:25 +08:00
{
try
{
await SubscribeBatchAsync(new[] { topic }, messageBatchHandler, groupId, batchSize, batchTimeout, consumeTimeout);
}
catch (Exception ex)
{
throw;
}
2025-04-16 18:26:25 +08:00
}
/// <summary>
/// 批量订阅消息
/// </summary>
/// <typeparam name="TValue">消息Value类型</typeparam>
/// <param name="topics">主题列表</param>
/// <param name="messageBatchHandler">批量消息处理函数</param>
/// <param name="groupId">消费组ID</param>
/// <param name="batchSize">批次大小</param>
/// <param name="batchTimeout">批次超时时间</param>
/// <param name="consumeTimeout">消费等待时间</param>
public async Task SubscribeBatchAsync<TValue>(string[] topics, Func<List<TValue>, Task<bool>> messageBatchHandler, string? groupId = null, int batchSize = 100, TimeSpan? batchTimeout = null, TimeSpan? consumeTimeout = null) where TValue : class
2025-04-16 18:26:25 +08:00
{
try
{
await _kafkaPollyPipeline.KafkaPipeline.ExecuteAsync(async token =>
{
2025-04-16 18:26:25 +08:00
var consumerKey = $"{groupId}_{string.Join("_", topics)}_{typeof(Ignore).Name}_{typeof(TValue).Name}";
var consumerStore = _consumerStore.GetOrAdd(consumerKey, _ =>
(
CreateConsumer<Ignore, TValue>(groupId),
new CancellationTokenSource()
));
if (consumerStore.Consumer == null)
{
_logger.LogWarning($"{string.Join("", topics)}创建消息消费失败或消费组已被释放");
return;
}
var consumer = consumerStore.Consumer as IConsumer<Ignore, TValue>;
var cts = consumerStore.CTS;
2025-04-16 18:26:25 +08:00
consumer!.Subscribe(topics);
2025-04-16 18:26:25 +08:00
var timeout = batchTimeout ?? TimeSpan.FromSeconds(5); // 默认超时时间调整为5秒
_ = Task.Run(async () =>
2025-04-16 18:26:25 +08:00
{
var messages = new List<(TValue Value, TopicPartitionOffset Offset)>();
var startTime = DateTime.UtcNow;
while (!cts.IsCancellationRequested)
2025-04-16 18:26:25 +08:00
{
try
2025-04-16 18:26:25 +08:00
{
// 非阻塞快速累积消息
while (messages.Count < batchSize && (DateTime.UtcNow - startTime) < timeout)
2025-04-16 18:26:25 +08:00
{
var result = consumer.Consume(TimeSpan.Zero); // 非阻塞调用
if (result != null)
{
if (result.IsPartitionEOF)
{
//_logger.LogInformation("Kafka消费: {Topic} 分区 {Partition} 已消费完", result.Topic, result.Partition);
await Task.Delay(DelayTime, cts.Token);
}
else if (result.Message.Value != null)
2025-04-16 20:41:52 +08:00
{
if (_kafkaOptionConfig.EnableFilter)
{
var headersFilter = new HeadersFilter { { "route-key", Encoding.UTF8.GetBytes(_applicationOptions.ServerTagName) } };
// 检查 Header 是否符合条件
if (!headersFilter.Match(result.Message.Headers))
{
consumer.Commit(result); // 提交偏移量
// 跳过消息
continue;
}
}
messages.Add((result.Message.Value, result.TopicPartitionOffset));
2025-04-16 20:41:52 +08:00
}
}
else
{
// 无消息时短暂等待
await Task.Delay(DelayTime, cts.Token);
2025-04-16 18:26:25 +08:00
}
}
2025-04-16 18:26:25 +08:00
// 处理批次
if (messages.Count > 0)
2025-04-16 18:26:25 +08:00
{
bool success = await messageBatchHandler(messages.Select(m => m.Value).ToList());
if (success)
2025-04-16 18:26:25 +08:00
{
var offsetsByPartition = new Dictionary<TopicPartition, long>();
foreach (var msg in messages)
{
var tp = msg.Offset.TopicPartition;
var offset = msg.Offset.Offset;
if (!offsetsByPartition.TryGetValue(tp, out var currentMax) || offset > currentMax)
{
offsetsByPartition[tp] = offset;
}
}
2025-04-16 18:26:25 +08:00
var offsetsToCommit = offsetsByPartition
.Select(kv => new TopicPartitionOffset(kv.Key, new Offset(kv.Value + 1)))
.ToList();
consumer.Commit(offsetsToCommit);
}
messages.Clear();
}
2025-04-16 18:26:25 +08:00
startTime = DateTime.UtcNow;
}
catch (ConsumeException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
{
_logger.LogError(ex, $"消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
}
catch (KafkaException ex) when (KafkaPollyPipeline.IsRecoverableError(ex))
{
_logger.LogError(ex, $"{string.Join("", topics)} 消息消费失败: {ex.Error.Reason}");
throw; // 抛出异常,以便重试
}
catch (OperationCanceledException)
{
//ignore
}
catch (ObjectDisposedException)
{
_logger.LogError($"{string.Join("", topics)}消费者被释放");
break;
}
catch (Exception ex)
{
_logger.LogError(ex, "处理批量消息时发生未知错误");
}
}
}, cts.Token);
2025-04-16 18:26:25 +08:00
await Task.CompletedTask;
});
}
catch (Exception ex)
{
throw;
}
2025-04-16 18:26:25 +08:00
}
2025-04-15 15:49:22 +08:00
/// <summary>
/// 取消消息订阅
/// </summary>
/// <typeparam name="TKey"></typeparam>
/// <typeparam name="TValue"></typeparam>
public void Unsubscribe<TKey, TValue>(string[] topics, string? groupId) where TKey : notnull where TValue : class
2025-04-09 14:33:20 +08:00
{
try
2025-04-15 15:49:22 +08:00
{
var consumerKey = $"{groupId}_{string.Join("_", topics)}_{typeof(TKey).Name}_{typeof(TValue).Name}";
if (_consumerStore.TryGetValue(consumerKey, out var entry))
{
entry.CTS.Cancel();
(entry.Consumer as IDisposable)?.Dispose();
entry.CTS.Dispose();
// 从字典中移除
_consumerStore.TryRemove(consumerKey, out entry);
}
}
catch (Exception ex)
{
throw;
2025-04-15 15:49:22 +08:00
}
2025-04-09 14:33:20 +08:00
}
2025-04-15 15:49:22 +08:00
/// <summary>
/// 释放资源
/// </summary>
2025-04-09 14:33:20 +08:00
public void Dispose()
{
2025-04-15 15:49:22 +08:00
foreach (var entry in _consumerStore.Values)
{
entry.CTS.Cancel();
(entry.Consumer as IDisposable)?.Dispose();
entry.CTS.Dispose();
}
_consumerStore.Clear();
2025-04-09 14:33:20 +08:00
}
}
}