You've already forked VptPassiveAdapter
feat(task): 优化文件列表获取逻辑并添加缓存机制
- 实现按时间前缀获取文件列表,支持小时级目录检索 - 添加降级机制,当时间前缀方式无法找到文件时回退到按天目录 - 在适配器层添加单例模式和客户端连接池管理 - 为S3和AliOSS适配器添加文件列表缓存功能 - 修复跨天任务处理逻辑,约束业务不支持跨天操作 - 优化文件去重逻辑,避免重复处理相同文件 - 添加详细的链路追踪和错误处理机制
This commit is contained in:
@@ -4,6 +4,7 @@ import (
|
||||
"ZhenTuLocalPassiveAdapter/config"
|
||||
"ZhenTuLocalPassiveAdapter/dto"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -11,18 +12,28 @@ type Adapter interface {
|
||||
GetFileList(ctx context.Context, path string, relDt time.Time) ([]dto.File, error)
|
||||
}
|
||||
|
||||
var (
|
||||
adapterOnce sync.Once
|
||||
adapterInstance Adapter
|
||||
)
|
||||
|
||||
func GetAdapter() Adapter {
|
||||
if config.Config.Record.Storage.Type == "s3" {
|
||||
return &S3Adapter{
|
||||
StorageConfig: config.Config.Record.Storage,
|
||||
adapterOnce.Do(func() {
|
||||
if config.Config.Record.Storage.Type == "s3" {
|
||||
adapterInstance = &S3Adapter{
|
||||
StorageConfig: config.Config.Record.Storage,
|
||||
}
|
||||
return
|
||||
}
|
||||
} else if config.Config.Record.Storage.Type == "alioss" {
|
||||
return &AliOSSAdapter{
|
||||
StorageConfig: config.Config.Record.Storage,
|
||||
if config.Config.Record.Storage.Type == "alioss" {
|
||||
adapterInstance = &AliOSSAdapter{
|
||||
StorageConfig: config.Config.Record.Storage,
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return &LocalAdapter{
|
||||
adapterInstance = &LocalAdapter{
|
||||
config.Config.Record.Storage,
|
||||
}
|
||||
}
|
||||
})
|
||||
return adapterInstance
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
@@ -18,25 +19,44 @@ import (
|
||||
)
|
||||
|
||||
type AliOSSAdapter struct {
|
||||
StorageConfig config.StorageConfig
|
||||
ossClient *oss.Client
|
||||
StorageConfig config.StorageConfig
|
||||
fileListCacheOnce sync.Once
|
||||
fileListCache *fileListCache
|
||||
|
||||
clientOnce sync.Once
|
||||
clientErr error
|
||||
ossClient *oss.Client
|
||||
}
|
||||
|
||||
func (a *AliOSSAdapter) getClient() (*oss.Client, error) {
|
||||
if a.ossClient == nil {
|
||||
a.clientOnce.Do(func() {
|
||||
client, err := oss.New(
|
||||
a.StorageConfig.AliOSS.Endpoint,
|
||||
a.StorageConfig.AliOSS.AccessKeyId,
|
||||
a.StorageConfig.AliOSS.AccessKeySecret,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("创建阿里云OSS客户端失败: %w", err)
|
||||
a.clientErr = fmt.Errorf("创建阿里云OSS客户端失败: %w", err)
|
||||
return
|
||||
}
|
||||
a.ossClient = client
|
||||
})
|
||||
if a.clientErr != nil {
|
||||
return nil, a.clientErr
|
||||
}
|
||||
if a.ossClient == nil {
|
||||
return nil, fmt.Errorf("阿里云OSS客户端未初始化")
|
||||
}
|
||||
return a.ossClient, nil
|
||||
}
|
||||
|
||||
func (a *AliOSSAdapter) getFileListCache() *fileListCache {
|
||||
a.fileListCacheOnce.Do(func() {
|
||||
a.fileListCache = newFileListCache(getFileListCacheTTL(), getFileListCacheMaxEntries())
|
||||
})
|
||||
return a.fileListCache
|
||||
}
|
||||
|
||||
func (a *AliOSSAdapter) GetFileList(ctx context.Context, dirPath string, relDt time.Time) ([]dto.File, error) {
|
||||
_, span := tracer.Start(ctx, "GetFileList_alioss")
|
||||
defer span.End()
|
||||
@@ -50,7 +70,7 @@ func (a *AliOSSAdapter) GetFileList(ctx context.Context, dirPath string, relDt t
|
||||
}
|
||||
|
||||
cacheKey := fmt.Sprintf("%s_%s", dirPath, relDt.Format("2006-01-02"))
|
||||
fileListCache := getAliOssFileListCache()
|
||||
fileListCache := a.getFileListCache()
|
||||
if cachedFiles, ok := fileListCache.Get(cacheKey); ok {
|
||||
logger.Debug("获取已缓存列表", zap.String("cacheKey", cacheKey))
|
||||
span.SetAttributes(attribute.Bool("cache.hit", true))
|
||||
|
||||
@@ -18,28 +18,6 @@ const (
|
||||
fileListCacheCleanupInterval = 1 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
s3FileListCacheOnce sync.Once
|
||||
s3FileListCacheInstance *fileListCache
|
||||
|
||||
aliOssFileListCacheOnce sync.Once
|
||||
aliOssFileListCacheInstance *fileListCache
|
||||
)
|
||||
|
||||
func getS3FileListCache() *fileListCache {
|
||||
s3FileListCacheOnce.Do(func() {
|
||||
s3FileListCacheInstance = newFileListCache(getFileListCacheTTL(), getFileListCacheMaxEntries())
|
||||
})
|
||||
return s3FileListCacheInstance
|
||||
}
|
||||
|
||||
func getAliOssFileListCache() *fileListCache {
|
||||
aliOssFileListCacheOnce.Do(func() {
|
||||
aliOssFileListCacheInstance = newFileListCache(getFileListCacheTTL(), getFileListCacheMaxEntries())
|
||||
})
|
||||
return aliOssFileListCacheInstance
|
||||
}
|
||||
|
||||
func getFileListCacheTTL() time.Duration {
|
||||
ttlSeconds := config.Config.Record.Cache.FileListTTLSeconds
|
||||
if ttlSeconds <= 0 {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
@@ -20,12 +21,17 @@ import (
|
||||
)
|
||||
|
||||
type S3Adapter struct {
|
||||
StorageConfig config.StorageConfig
|
||||
s3Client *s3.Client
|
||||
StorageConfig config.StorageConfig
|
||||
fileListCacheOnce sync.Once
|
||||
fileListCache *fileListCache
|
||||
|
||||
clientOnce sync.Once
|
||||
clientErr error
|
||||
s3Client *s3.Client
|
||||
}
|
||||
|
||||
func (s *S3Adapter) getClient() (*s3.Client, error) {
|
||||
if s.s3Client == nil {
|
||||
s.clientOnce.Do(func() {
|
||||
const defaultRegion = "us-east-1"
|
||||
resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
|
||||
return aws.Endpoint{
|
||||
@@ -42,10 +48,26 @@ func (s *S3Adapter) getClient() (*s3.Client, error) {
|
||||
EndpointResolver: resolver,
|
||||
}
|
||||
s.s3Client = s3.NewFromConfig(cfg)
|
||||
if s.s3Client == nil {
|
||||
s.clientErr = fmt.Errorf("创建S3客户端失败")
|
||||
}
|
||||
})
|
||||
if s.clientErr != nil {
|
||||
return nil, s.clientErr
|
||||
}
|
||||
if s.s3Client == nil {
|
||||
return nil, fmt.Errorf("S3客户端未初始化")
|
||||
}
|
||||
return s.s3Client, nil
|
||||
}
|
||||
|
||||
func (s *S3Adapter) getFileListCache() *fileListCache {
|
||||
s.fileListCacheOnce.Do(func() {
|
||||
s.fileListCache = newFileListCache(getFileListCacheTTL(), getFileListCacheMaxEntries())
|
||||
})
|
||||
return s.fileListCache
|
||||
}
|
||||
|
||||
func (s *S3Adapter) GetFileList(ctx context.Context, dirPath string, relDt time.Time) ([]dto.File, error) {
|
||||
_, span := tracer.Start(ctx, "GetFileList_s3")
|
||||
defer span.End()
|
||||
@@ -59,7 +81,7 @@ func (s *S3Adapter) GetFileList(ctx context.Context, dirPath string, relDt time.
|
||||
}
|
||||
|
||||
cacheKey := fmt.Sprintf("%s_%s", dirPath, relDt.Format("2006-01-02"))
|
||||
fileListCache := getS3FileListCache()
|
||||
fileListCache := s.getFileListCache()
|
||||
if cachedFiles, ok := fileListCache.Get(cacheKey); ok {
|
||||
logger.Debug("获取已缓存列表", zap.String("cacheKey", cacheKey))
|
||||
span.SetAttributes(attribute.Bool("cache.hit", true))
|
||||
|
||||
Reference in New Issue
Block a user