You've already forked VptPassiveAdapter
feat(config): 添加文件列表缓存配置并优化阿里云和S3适配器缓存实现
- 添加 CacheConfig 结构体定义文件列表缓存的TTL和最大条目数 - 在RecordConfig中集成Cache配置项 - 为AliOSS和S3适配器实现统一的文件列表缓存机制 - 移除原有的sync.Map缓存实现和定时清理逻辑 - 引入go-cache依赖库实现专业的缓存管理功能 - 使用LRU算法控制缓存大小避免内存泄漏 - 通过singleflight实现缓存穿透保护和并发控制 - 更新配置文件添加缓存相关配置项 - 在.gitignore中添加.exe文件忽略规则
This commit is contained in:
208
fs/s3_adapter.go
208
fs/s3_adapter.go
@@ -11,7 +11,6 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
@@ -20,8 +19,6 @@ import (
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
)
|
||||
|
||||
var s3Cache sync.Map
|
||||
|
||||
type S3Adapter struct {
|
||||
StorageConfig config.StorageConfig
|
||||
s3Client *s3.Client
|
||||
@@ -62,158 +59,99 @@ func (s *S3Adapter) GetFileList(ctx context.Context, dirPath string, relDt time.
|
||||
}
|
||||
|
||||
cacheKey := fmt.Sprintf("%s_%s", dirPath, relDt.Format("2006-01-02"))
|
||||
if cachedInterface, ok := s3Cache.Load(cacheKey); ok {
|
||||
cachedItem := cachedInterface.(cacheItem)
|
||||
logger.Debug("缓存过期时间", zap.Duration("expiresIn", cachedItem.expires.Sub(time.Now())))
|
||||
if time.Now().Before(cachedItem.expires) {
|
||||
logger.Debug("获取已缓存列表", zap.String("cacheKey", cacheKey))
|
||||
span.SetAttributes(attribute.Bool("cache.hit", true))
|
||||
return cachedItem.data, nil
|
||||
}
|
||||
fileListCache := getS3FileListCache()
|
||||
if cachedFiles, ok := fileListCache.Get(cacheKey); ok {
|
||||
logger.Debug("获取已缓存列表", zap.String("cacheKey", cacheKey))
|
||||
span.SetAttributes(attribute.Bool("cache.hit", true))
|
||||
span.SetAttributes(attribute.Int("file.count", len(cachedFiles)))
|
||||
span.SetStatus(codes.Ok, "文件读取成功")
|
||||
return cachedFiles, nil
|
||||
}
|
||||
|
||||
mutexKey := fmt.Sprintf("lock_%s", cacheKey)
|
||||
mutex, _ := s3Cache.LoadOrStore(mutexKey, &sync.Mutex{})
|
||||
lock := mutex.(*sync.Mutex)
|
||||
defer func() {
|
||||
// 解锁后删除锁(避免内存泄漏)
|
||||
s3Cache.Delete(mutexKey)
|
||||
lock.Unlock()
|
||||
}()
|
||||
lock.Lock()
|
||||
|
||||
if cachedInterface, ok := s3Cache.Load(cacheKey); ok {
|
||||
cachedItem := cachedInterface.(cacheItem)
|
||||
logger.Debug("缓存过期时间", zap.Duration("expiresIn", cachedItem.expires.Sub(time.Now())))
|
||||
if time.Now().Before(cachedItem.expires) {
|
||||
logger.Debug("过锁后获取已缓存列表", zap.String("cacheKey", cacheKey))
|
||||
span.SetAttributes(attribute.Bool("s3Cache.hit", true))
|
||||
return cachedItem.data, nil
|
||||
}
|
||||
}
|
||||
|
||||
listObjectsInput := &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.StorageConfig.S3.Bucket),
|
||||
Prefix: aws.String(path.Join(s.StorageConfig.S3.Prefix, dirPath)),
|
||||
MaxKeys: aws.Int32(1000),
|
||||
}
|
||||
|
||||
client, err := s.getClient()
|
||||
if err != nil {
|
||||
span.SetAttributes(attribute.String("error", err.Error()))
|
||||
span.SetStatus(codes.Error, "创建S3客户端失败")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileList []dto.File
|
||||
var continuationToken *string
|
||||
|
||||
for {
|
||||
if continuationToken != nil {
|
||||
listObjectsInput.ContinuationToken = continuationToken
|
||||
fileList, hit, shared, err := fileListCache.GetOrLoad(cacheKey, func() ([]dto.File, error) {
|
||||
listObjectsInput := &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.StorageConfig.S3.Bucket),
|
||||
Prefix: aws.String(path.Join(s.StorageConfig.S3.Prefix, dirPath)),
|
||||
MaxKeys: aws.Int32(1000),
|
||||
}
|
||||
|
||||
result, err := client.ListObjectsV2(context.TODO(), listObjectsInput)
|
||||
client, err := s.getClient()
|
||||
if err != nil {
|
||||
span.SetAttributes(attribute.String("error", err.Error()))
|
||||
span.SetStatus(codes.Error, "文件列表读取失败")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, object := range result.Contents {
|
||||
key := *object.Key
|
||||
if !util.IsVideoFile(path.Base(key)) {
|
||||
continue
|
||||
var resultFiles []dto.File
|
||||
var continuationToken *string
|
||||
|
||||
for {
|
||||
if continuationToken != nil {
|
||||
listObjectsInput.ContinuationToken = continuationToken
|
||||
}
|
||||
startTime, stopTime, err := util.ParseStartStopTime(path.Base(key), relDt)
|
||||
|
||||
result, err := client.ListObjectsV2(context.TODO(), listObjectsInput)
|
||||
if err != nil {
|
||||
continue
|
||||
return nil, err
|
||||
}
|
||||
if stopTime.IsZero() {
|
||||
stopTime = startTime
|
||||
|
||||
for _, object := range result.Contents {
|
||||
key := *object.Key
|
||||
if !util.IsVideoFile(path.Base(key)) {
|
||||
continue
|
||||
}
|
||||
startTime, stopTime, err := util.ParseStartStopTime(path.Base(key), relDt)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if stopTime.IsZero() {
|
||||
stopTime = startTime
|
||||
}
|
||||
if startTime.Equal(stopTime) {
|
||||
stopTime = stopTime.Add(time.Second * time.Duration(config.Config.Record.Duration))
|
||||
}
|
||||
presignClient := s3.NewPresignClient(client)
|
||||
request, err := presignClient.PresignGetObject(context.TODO(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.StorageConfig.S3.Bucket),
|
||||
Key: aws.String(key),
|
||||
}, func(presignOptions *s3.PresignOptions) {
|
||||
presignOptions.Expires = 10 * time.Minute
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("生成预签名URL失败", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
resultFiles = append(resultFiles, dto.File{
|
||||
BasePath: s.StorageConfig.S3.Bucket,
|
||||
Name: path.Base(key),
|
||||
Path: path.Dir(key),
|
||||
Url: request.URL,
|
||||
StartTime: startTime,
|
||||
EndTime: stopTime,
|
||||
})
|
||||
}
|
||||
if startTime.Equal(stopTime) {
|
||||
stopTime = stopTime.Add(time.Second * time.Duration(config.Config.Record.Duration))
|
||||
|
||||
if !*result.IsTruncated {
|
||||
break
|
||||
}
|
||||
presignClient := s3.NewPresignClient(client)
|
||||
request, err := presignClient.PresignGetObject(context.TODO(), &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.StorageConfig.S3.Bucket),
|
||||
Key: aws.String(key),
|
||||
}, func(presignOptions *s3.PresignOptions) {
|
||||
presignOptions.Expires = 10 * time.Minute
|
||||
})
|
||||
if err != nil {
|
||||
span.SetAttributes(attribute.String("error", err.Error()))
|
||||
span.SetStatus(codes.Error, "生成预签名URL失败")
|
||||
logger.Error("生成预签名URL失败", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
fileList = append(fileList, dto.File{
|
||||
BasePath: s.StorageConfig.S3.Bucket,
|
||||
Name: path.Base(key),
|
||||
Path: path.Dir(key),
|
||||
Url: request.URL,
|
||||
StartTime: startTime,
|
||||
EndTime: stopTime,
|
||||
})
|
||||
continuationToken = result.NextContinuationToken
|
||||
}
|
||||
|
||||
if !*result.IsTruncated {
|
||||
break
|
||||
}
|
||||
continuationToken = result.NextContinuationToken
|
||||
sort.Slice(resultFiles, func(i, j int) bool {
|
||||
return resultFiles[i].StartTime.Before(resultFiles[j].StartTime)
|
||||
})
|
||||
return resultFiles, nil
|
||||
})
|
||||
if err != nil {
|
||||
span.SetAttributes(attribute.String("error", err.Error()))
|
||||
span.SetStatus(codes.Error, "文件列表读取失败")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
span.SetAttributes(attribute.Bool("cache.shared", shared))
|
||||
span.SetAttributes(attribute.Int("file.count", len(fileList)))
|
||||
sort.Slice(fileList, func(i, j int) bool {
|
||||
return fileList[i].StartTime.Before(fileList[j].StartTime)
|
||||
})
|
||||
span.SetStatus(codes.Ok, "文件读取成功")
|
||||
|
||||
cacheItem := cacheItem{
|
||||
data: fileList,
|
||||
expires: time.Now().Add(30 * time.Second),
|
||||
if !hit && !shared {
|
||||
logger.Debug("缓存文件列表", zap.String("cacheKey", cacheKey))
|
||||
}
|
||||
s3Cache.Store(cacheKey, cacheItem)
|
||||
logger.Debug("缓存文件列表", zap.String("cacheKey", cacheKey))
|
||||
|
||||
return fileList, nil
|
||||
}
|
||||
|
||||
type cacheItem struct {
|
||||
data []dto.File
|
||||
expires time.Time
|
||||
}
|
||||
|
||||
// 添加定时清理缓存的初始化函数
|
||||
func init() {
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
cleanupCache()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// 添加缓存清理函数
|
||||
func cleanupCache() {
|
||||
var keysToDelete []interface{}
|
||||
s3Cache.Range(func(key, value interface{}) bool {
|
||||
// 类型检查:跳过非 cacheItem 类型的值(例如 lock_xxx 对应的 *sync.Mutex)
|
||||
item, ok := value.(cacheItem)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
if time.Now().After(item.expires) {
|
||||
keysToDelete = append(keysToDelete, key)
|
||||
}
|
||||
return true
|
||||
})
|
||||
for _, key := range keysToDelete {
|
||||
s3Cache.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user