Compare commits

...

9 Commits

Author SHA1 Message Date
c2ce6f91ed 日志调整 2025-06-13 16:55:11 +08:00
da3de2cc89 缓存时间调整 2025-06-13 15:36:29 +08:00
d245d09837 支持跳过景区文件删除,跳过重复adapter 2025-06-13 15:36:29 +08:00
4d8236afa1 通过identity定义是否相同 2025-06-13 15:35:11 +08:00
04e2ade669 添加锁避免高并发冲突 2025-06-13 15:35:11 +08:00
cd4678120d 修复删除逻辑 2025-06-13 15:35:11 +08:00
cf9802f9ec 添加后台订单筛选 2025-06-10 16:46:18 +08:00
a84b38dab5 推客统计 2025-06-10 16:46:18 +08:00
e32f231a8f 设备支持自定义标定区域 2025-06-10 16:46:18 +08:00
22 changed files with 123 additions and 191 deletions

View File

@ -61,4 +61,5 @@ public class DeviceConfigEntity {
private Integer imageFree;
private Integer videoFree;
private Long pairDevice;
private String videoCrop;
}

View File

@ -38,4 +38,6 @@ public class FaceSampleReqQuery extends BaseQueryParameterReq {
private Date startTime;
@JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8")
private Date endTime;
private String matchSampleIds;
}

View File

@ -31,7 +31,7 @@ public class DeviceRepository {
}
DeviceEntity device = deviceMapper.getByDeviceId(deviceId);
if (null != device) {
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, deviceId), JSONObject.toJSONString(device));
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, deviceId), JSONObject.toJSONString(device), 3, TimeUnit.DAYS);
}
return device;
}
@ -45,8 +45,8 @@ public class DeviceRepository {
device = deviceMapper.getByDeviceNo2(deviceNo);
}
if (null != device) {
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, deviceNo), JSONObject.toJSONString(device));
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, device.getId()), JSONObject.toJSONString(device));
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, deviceNo), JSONObject.toJSONString(device), 3, TimeUnit.DAYS);
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, device.getId()), JSONObject.toJSONString(device), 3, TimeUnit.DAYS);
} else {
redisTemplate.opsForValue().set(String.format(DEVICE_CACHE_KEY, deviceNo), "null", 60L, TimeUnit.SECONDS);
}
@ -64,7 +64,7 @@ public class DeviceRepository {
deviceConfig.setDeviceId(deviceId);
deviceMapper.addConfig(deviceConfig);
}
redisTemplate.opsForValue().set(String.format(DEVICE_CONFIG_CACHE_KEY, deviceId), JSONObject.toJSONString(deviceConfig));
redisTemplate.opsForValue().set(String.format(DEVICE_CONFIG_CACHE_KEY, deviceId), JSONObject.toJSONString(deviceConfig), 3, TimeUnit.DAYS);
return deviceConfig;
}
@ -111,7 +111,7 @@ public class DeviceRepository {
device.setOnline(online);
device.setKeepaliveAt(keepaliveAt);
device.setIpAddr(ipAddr);
redisTemplate.opsForValue().set(String.format(DEVICE_ONLINE_CACHE_KEY, deviceId), JSONObject.toJSONString(device), 2, TimeUnit.DAYS);
redisTemplate.opsForValue().set(String.format(DEVICE_ONLINE_CACHE_KEY, deviceId), JSONObject.toJSONString(device), 3, TimeUnit.DAYS);
// deviceMapper.updateOnlineStatus(deviceId, ipAddr, online, keepaliveAt);
updateDeviceCache(device);
}

View File

@ -34,7 +34,7 @@ public class FaceRepository {
}
FaceEntity face = faceMapper.get(id);
if (face != null) {
redisTemplate.opsForValue().set(String.format(FACE_CACHE_KEY, id), JSONObject.toJSONString(face), 60 * 60 * 24L, TimeUnit.SECONDS);
redisTemplate.opsForValue().set(String.format(FACE_CACHE_KEY, id), JSONObject.toJSONString(face), 12, TimeUnit.HOURS);
}
return face;
}

View File

@ -8,6 +8,7 @@ import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
import java.math.BigDecimal;
import java.util.concurrent.TimeUnit;
@Component
public class PriceRepository {
@ -27,8 +28,8 @@ public class PriceRepository {
if (priceConfigEntity == null) {
priceConfigEntity = mapper.getPriceByScenicTypeGoods(scenicId, type, goodsId);
if (priceConfigEntity != null) {
redisTemplate.opsForValue().set(cacheKey, JSON.toJSONString(priceConfigEntity));
redisTemplate.opsForValue().set(String.format(PRICE_ID_CACHE, priceConfigEntity.getId()), JSON.toJSONString(priceConfigEntity));
redisTemplate.opsForValue().set(cacheKey, JSON.toJSONString(priceConfigEntity), 12, TimeUnit.HOURS);
redisTemplate.opsForValue().set(String.format(PRICE_ID_CACHE, priceConfigEntity.getId()), JSON.toJSONString(priceConfigEntity), 12, TimeUnit.HOURS);
}
}
return priceConfigEntity;

View File

@ -11,6 +11,7 @@ import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;
import java.math.BigDecimal;
import java.util.concurrent.TimeUnit;
@Component
public class VideoRepository {
@ -28,8 +29,8 @@ public class VideoRepository {
}
VideoEntity video = videoMapper.getEntity(videoId);
if (video != null) {
redisTemplate.opsForValue().set(String.format(VIDEO_CACHE_KEY, videoId), JSON.toJSONString(video));
redisTemplate.opsForValue().set(String.format(VIDEO_BY_TASK_ID_CACHE_KEY, video.getTaskId()), JSON.toJSONString(video));
redisTemplate.opsForValue().set(String.format(VIDEO_CACHE_KEY, videoId), JSON.toJSONString(video), 12, TimeUnit.HOURS);
redisTemplate.opsForValue().set(String.format(VIDEO_BY_TASK_ID_CACHE_KEY, video.getTaskId()), JSON.toJSONString(video), 12, TimeUnit.HOURS);
}
return video;
}
@ -40,8 +41,8 @@ public class VideoRepository {
}
VideoEntity video = videoMapper.findByTaskId(taskId);
if (video != null) {
redisTemplate.opsForValue().set(String.format(VIDEO_BY_TASK_ID_CACHE_KEY, taskId), JSON.toJSONString(video));
redisTemplate.opsForValue().set(String.format(VIDEO_CACHE_KEY, video.getId()), JSON.toJSONString(video));
redisTemplate.opsForValue().set(String.format(VIDEO_BY_TASK_ID_CACHE_KEY, taskId), JSON.toJSONString(video), 12, TimeUnit.HOURS);
redisTemplate.opsForValue().set(String.format(VIDEO_CACHE_KEY, video.getId()), JSON.toJSONString(video), 12, TimeUnit.HOURS);
}
return video;
}

View File

@ -31,7 +31,7 @@ public class VideoTaskRepository {
} else {
TaskEntity task = taskMapper.get(taskId);
if (task != null && 1 == task.getStatus()) {
redisTemplate.opsForValue().set(String.format(TASK_CACHE_KEY, taskId), JSONObject.toJSONString(task), 60 * 60 * 24L, TimeUnit.SECONDS);
redisTemplate.opsForValue().set(String.format(TASK_CACHE_KEY, taskId), JSONObject.toJSONString(task), 12, TimeUnit.HOURS);
}
return task;
}

View File

@ -35,6 +35,11 @@ import java.util.stream.Collectors;
final public class AliOssAdapter extends AStorageAdapter {
private AliOssStorageConfig config;
@Override
public String identity() {
return config.identity();
}
@Override
public void loadConfig(Map<String, String> _config) {
AliOssStorageConfig config = new AliOssStorageConfig();

View File

@ -28,6 +28,11 @@ import java.util.stream.Collectors;
public class AwsOssAdapter extends AStorageAdapter {
private AwsOssStorageConfig config;
@Override
public String identity() {
return config.identity();
}
@Override
public void loadConfig(Map<String, String> _config) {
AwsOssStorageConfig config = new AwsOssStorageConfig();

View File

@ -12,6 +12,7 @@ import java.util.List;
import java.util.Map;
public interface IStorageAdapter {
String identity();
void loadConfig(Map<String, String> config);
void setConfig(StorageConfig config);
String uploadFile(String contentType, InputStream inputStream, String ...path);

View File

@ -11,6 +11,11 @@ import java.util.List;
import java.util.Map;
public class LocalStorageAdapter extends AStorageAdapter{
@Override
public String identity() {
return "";
}
@Override
public void loadConfig(Map<String, String> config) {

View File

@ -1,5 +1,6 @@
package com.ycwl.basic.storage.entity;
import cn.hutool.crypto.digest.MD5;
import lombok.Data;
import lombok.EqualsAndHashCode;
@ -19,6 +20,11 @@ public class AliOssStorageConfig extends StorageConfig {
// TODO: 检查配置是否正确
}
@Override
public String identity() {
return MD5.create().digestHex16(accessKeyId + bucketName + prefix);
}
public String getUrl() {
String url = this.url;
if (url == null) {

View File

@ -1,5 +1,6 @@
package com.ycwl.basic.storage.entity;
import cn.hutool.crypto.digest.MD5;
import lombok.Data;
import lombok.EqualsAndHashCode;
@ -19,6 +20,11 @@ public class AwsOssStorageConfig extends StorageConfig {
// TODO: 检查配置是否正确
}
@Override
public String identity() {
return MD5.create().digestHex16(accessKeyId + bucketName + prefix);
}
public String getUrl() {
String url = this.url;
if (url == null) {

View File

@ -2,4 +2,5 @@ package com.ycwl.basic.storage.entity;
public abstract class StorageConfig {
public abstract void checkEverythingOK();
public abstract String identity();
}

View File

@ -33,11 +33,13 @@ import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Objects;
import static com.ycwl.basic.constant.FaceConstant.USER_FACE_DB_NAME;
import static com.ycwl.basic.constant.StorageConstant.VIID_FACE;
@Component
@EnableScheduling
@ -58,6 +60,7 @@ public class FaceCleaner {
private FaceMapper faceMapper;
@Autowired
private ScenicService scenicService;
public static final List<String> disableDeleteScenicIds = List.of("3955650120997015552");
@Scheduled(cron = "0 0 1 * * ?")
public void deleteExpireSample(){
@ -217,9 +220,9 @@ public class FaceCleaner {
}
private void cleanFaceSampleOss() {
log.info("开始清理人脸文件");
List<FaceSampleRespVO> faceSampleRespVOS = faceSampleMapper.list(new FaceSampleReqQuery());
List<FaceSampleRespVO> list = faceSampleMapper.list(new FaceSampleReqQuery());
IStorageAdapter adapter = StorageFactory.use("faces");
List<StorageFileObject> fileObjectList = adapter.listDir("user-face");
List<StorageFileObject> fileObjectList = adapter.listDir(VIID_FACE);
fileObjectList.parallelStream().forEach(fileObject -> {
if (fileObject.getModifyTime() != null) {
// 如果是一天以内修改的,则跳过
@ -227,7 +230,7 @@ public class FaceCleaner {
return;
}
}
if(faceSampleRespVOS.parallelStream().noneMatch(faceSampleRespVO -> faceSampleRespVO.getFaceUrl().contains(fileObject.getName()))){
if(list.parallelStream().noneMatch(faceSampleRespVO -> faceSampleRespVO.getFaceUrl().contains(fileObject.getName()))){
log.info("删除人脸文件:{}", fileObject);
adapter.deleteFile(fileObject.getFullPath());
}
@ -236,8 +239,20 @@ public class FaceCleaner {
public void cleanSourceOss() {
log.info("开始清理源视频素材文件");
List<SourceRespVO> list = sourceMapper.list(new SourceReqQuery());
ArrayList<String> adapterIdentity = new ArrayList<>();
scenicMapper.list(new ScenicReqQuery()).forEach(scenic -> {
if (disableDeleteScenicIds.contains(scenic.getId().toString())) {
log.info("景区【{}】禁止删除文件,跳过!", scenic.getName());
return;
}
IStorageAdapter adapter = scenicService.getScenicStorageAdapter(scenic.getId());
String identity = adapter.identity();
if (!adapterIdentity.contains(identity)) {
log.info("因为Identity相同,跳过");
adapterIdentity.add(identity);
} else {
return;
}
log.info("开始清理视频文件");
List<StorageFileObject> fileObjectList = adapter.listDir(StorageConstant.VIDEO_PIECE_PATH);
fileObjectList.parallelStream().forEach(fileObject -> {
@ -275,8 +290,20 @@ public class FaceCleaner {
public void cleanVideoOss() {
log.info("开始清理视频文件");
List<VideoRespVO> list = videoMapper.list(new VideoReqQuery());
ArrayList<String> adapterIdentity = new ArrayList<>();
scenicMapper.list(new ScenicReqQuery()).forEach(scenic -> {
if (disableDeleteScenicIds.contains(scenic.getId().toString())) {
log.info("景区【{}】禁止删除文件,跳过!", scenic.getName());
return;
}
IStorageAdapter adapter = scenicService.getScenicStorageAdapter(scenic.getId());
String identity = adapter.identity();
if (!adapterIdentity.contains(identity)) {
adapterIdentity.add(identity);
} else {
log.info("因为Identity相同,跳过");
return;
}
log.info("开始清理视频文件");
List<StorageFileObject> fileObjectList = adapter.listDir(StorageConstant.VLOG_PATH);
fileObjectList.parallelStream().forEach(fileObject -> {

View File

@ -23,6 +23,7 @@ import com.ycwl.basic.utils.SnowFlakeUtil;
import com.ycwl.basic.utils.VideoReUploader;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
@ -288,6 +289,9 @@ public class VideoPieceGetter {
sourceEntity.setUrl(imgSource.getUrl());
sourceEntity.setPosJson(imgSource.getPosJson());
}
if (StringUtils.isNotBlank(config.getVideoCrop())) {
sourceEntity.setPosJson(config.getVideoCrop());
}
sourceEntity.setVideoUrl(url);
sourceEntity.setFaceSampleId(faceSampleId);
sourceEntity.setScenicId(device.getScenicId());
@ -314,6 +318,9 @@ public class VideoPieceGetter {
videoReUploader.addTask(sourceEntity.getId());
} else {
source.setVideoUrl(url);
if (StringUtils.isNotBlank(config.getVideoCrop())) {
source.setPosJson(config.getVideoCrop());
}
sourceMapper.update(source);
videoReUploader.addTask(source.getId());
}

View File

@ -8,6 +8,7 @@ import java.io.InputStream;
import java.util.Date;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantLock;
public class WxMpUtil {
private static final String GET_WXA_CODE_URL = "https://api.weixin.qq.com/wxa/getwxacode?access_token=%s";
@ -16,25 +17,31 @@ public class WxMpUtil {
public static final String GET_USER_PHONE_URL = "https://api.weixin.qq.com/wxa/business/getuserphonenumber?access_token=%s";
private static final Map<String, String> tokens = new ConcurrentHashMap<>();
private static final Map<String, Date> expireTimes = new ConcurrentHashMap<>();
private static final ReentrantLock lock = new ReentrantLock();
private static String getAccessToken(String appId, String appSecret) {
if (expireTimes.containsKey(appId)) {
Date expireTime = expireTimes.get(appId);
if (expireTime.getTime() < System.currentTimeMillis()) {
lock.lock();
try {
if (expireTimes.containsKey(appId)) {
Date expireTime = expireTimes.get(appId);
if (expireTime.getTime() < System.currentTimeMillis()) {
tokens.remove(appId);
}
} else {
tokens.remove(appId);
}
} else {
tokens.remove(appId);
return tokens.computeIfAbsent(appId, (k) -> {
String url = String.format(ACCESS_TOKEN_URL, appId, appSecret);
String response = HttpUtil.get(url);
JSONObject jsonObject = JSONObject.parseObject(response);
String token = jsonObject.getString("access_token");
Date expireTime = new Date(System.currentTimeMillis() + jsonObject.getInteger("expires_in") * 1000 / 2);
expireTimes.put(appId, expireTime);
return token;
});
} finally {
lock.unlock();
}
return tokens.computeIfAbsent(appId, (k) -> {
String url = String.format(ACCESS_TOKEN_URL, appId, appSecret);
String response = HttpUtil.get(url);
JSONObject jsonObject = JSONObject.parseObject(response);
String token = jsonObject.getString("access_token");
Date expireTime = new Date(System.currentTimeMillis() + jsonObject.getInteger("expires_in") * 1000 / 2);
expireTimes.put(appId, expireTime);
return token;
});
}
public static void generateWXAQRCode(String appId, String appSecret, String envVersion, String path, String filePath) throws Exception {

View File

@ -8,45 +8,6 @@
</encoder>
</appender>
<!-- info 日志-->
<!-- RollingFileAppender:滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->
<!-- 以下的大概意思是:1.先按日期存日志,日期变了,将前一天的日志文件名重命名为XXX%日期%索引,新的日志仍然是project_info.log -->
<!-- 2.如果日期没有发生变化,但是当前日志的文件大小超过10MB时,对当前日志进行分割 重命名-->
<appender name="info_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--日志文件路径和名称-->
<File>logs/project_info.log</File>
<!--是否追加到文件末尾,默认为true-->
<append>true</append>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>DENY</onMatch><!-- 如果命中ERROR就禁止这条日志 -->
<onMismatch>ACCEPT</onMismatch><!-- 如果没有命中就使用这条规则 -->
</filter>
<!--有两个与RollingFileAppender交互的重要子组件。 第一个RollingFileAppender子组件,即RollingPolicy:负责执行翻转所需的操作。
RollingFileAppender的第二个子组件,即TriggeringPolicy:将确定是否以及何时发生翻转。 因此,RollingPolicy负责什么和TriggeringPolicy负责什么时候.
作为任何用途,RollingFileAppender必须同时设置RollingPolicy和TriggeringPolicy,但是,如果其RollingPolicy也实现了TriggeringPolicy接口,则只需要显式指定前者。-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- 日志文件的名字会根据fileNamePattern的值,每隔一段时间改变一次 -->
<!-- 文件名:logs/project_info.2017-12-05.0.log -->
<!-- 注意:SizeAndTimeBasedRollingPolicy中 %i和%d令牌都是强制性的,必须存在,要不会报错 -->
<fileNamePattern>logs/project_info.%d.%i.log</fileNamePattern>
<!-- 每产生一个日志文件,该日志文件的保存期限为30天, ps:maxHistory的单位是根据fileNamePattern中的翻转策略自动推算出来的,例如上面选用了yyyy-MM-dd,则单位为天
如果上面选用了yyyy-MM,则单位为月,另外上面的单位默认为yyyy-MM-dd-->
<maxHistory>7</maxHistory>
<!-- 每个日志文件到10mb的时候开始切分,最多保留30天,但最大到20GB,哪怕没到30天也要删除多余的日志 -->
<totalSizeCap>1GB</totalSizeCap>
<!-- maxFileSize:这是活动文件的大小,默认值是10MB,测试时可改成5KB看效果 -->
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<!--编码器-->
<encoder>
<!-- pattern节点,用来设置日志的输入格式 ps:日志文件中没有设置颜色,否则颜色部分会有ESC[0:39em等乱码-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level --- [%15.15(%thread)] %-40.40(%logger{40}) : %msg%n</pattern>
<!-- 记录日志的编码:此处设置字符集 - -->
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- error 日志-->
<!-- RollingFileAppender:滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->
<!-- 以下的大概意思是:1.先按日期存日志,日期变了,将前一天的日志文件名重命名为XXX%日期%索引,新的日志仍然是project_error.log -->
@ -85,39 +46,15 @@
</encoder>
</appender>
<appender name="sql_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<File>logs/project_sql.log</File>
<append>true</append>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>logs/project_sql.%d.%i.log</fileNamePattern>
<maxHistory>30</maxHistory>
<totalSizeCap>20GB</totalSizeCap>
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level --- [%15.15(%thread)] %-40.40(%logger{40}) : %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<root level="ERROR">
<appender-ref ref="error_log" />
<appender-ref ref="STDOUT" />
</root>
<root level="WARN">
<appender-ref ref="STDOUT" />
</root>
<!-- 指定项目中某个包,当有日志操作行为时的日志记录级别 -->
<!-- 级别依次为【从高到低】:FATAL > ERROR > WARN > INFO > DEBUG > TRACE -->
<logger name="com.ycwl.basic" level="INFO">
<appender-ref ref="info_log" />
<appender-ref ref="error_log" />
</logger>
<!-- 利用logback输入mybatis的sql日志,
注意:如果不加 additivity="false" 则此logger会将输出转发到自身以及祖先的logger中,就会出现日志文件中sql重复打印-->
<logger name="com.ycwl.basic.mapper" level="DEBUG" additivity="false">
<appender-ref ref="sql_log" />
</logger>
<root level="INFO">
<appender-ref ref="STDOUT" />
</root>
</configuration>

View File

@ -21,83 +21,6 @@
</encoder>
</appender>
<!-- info 日志-->
<!-- RollingFileAppender:滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->
<!-- 以下的大概意思是:1.先按日期存日志,日期变了,将前一天的日志文件名重命名为XXX%日期%索引,新的日志仍然是project_info.log -->
<!-- 2.如果日期没有发生变化,但是当前日志的文件大小超过10MB时,对当前日志进行分割 重命名-->
<appender name="info_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--日志文件路径和名称-->
<File>logs/project_info.log</File>
<!--是否追加到文件末尾,默认为true-->
<append>true</append>
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>DENY</onMatch><!-- 如果命中ERROR就禁止这条日志 -->
<onMismatch>ACCEPT</onMismatch><!-- 如果没有命中就使用这条规则 -->
</filter>
<!--有两个与RollingFileAppender交互的重要子组件。 第一个RollingFileAppender子组件,即RollingPolicy:负责执行翻转所需的操作。
RollingFileAppender的第二个子组件,即TriggeringPolicy:将确定是否以及何时发生翻转。 因此,RollingPolicy负责什么和TriggeringPolicy负责什么时候.
作为任何用途,RollingFileAppender必须同时设置RollingPolicy和TriggeringPolicy,但是,如果其RollingPolicy也实现了TriggeringPolicy接口,则只需要显式指定前者。-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- 日志文件的名字会根据fileNamePattern的值,每隔一段时间改变一次 -->
<!-- 文件名:logs/project_info.2017-12-05.0.log -->
<!-- 注意:SizeAndTimeBasedRollingPolicy中 %i和%d令牌都是强制性的,必须存在,要不会报错 -->
<fileNamePattern>logs/project_info.%d.%i.log</fileNamePattern>
<!-- 每产生一个日志文件,该日志文件的保存期限为30天, ps:maxHistory的单位是根据fileNamePattern中的翻转策略自动推算出来的,例如上面选用了yyyy-MM-dd,则单位为天
如果上面选用了yyyy-MM,则单位为月,另外上面的单位默认为yyyy-MM-dd-->
<maxHistory>30</maxHistory>
<!-- 每个日志文件到10mb的时候开始切分,最多保留30天,但最大到20GB,哪怕没到30天也要删除多余的日志 -->
<totalSizeCap>20GB</totalSizeCap>
<!-- maxFileSize:这是活动文件的大小,默认值是10MB,测试时可改成5KB看效果 -->
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<!--编码器-->
<encoder>
<!-- pattern节点,用来设置日志的输入格式 ps:日志文件中没有设置颜色,否则颜色部分会有ESC[0:39em等乱码-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level --- [%15.15(%thread)] %-40.40(%logger{40}) : %msg%n</pattern>
<!-- 记录日志的编码:此处设置字符集 - -->
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- error 日志-->
<!-- RollingFileAppender:滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->
<!-- 以下的大概意思是:1.先按日期存日志,日期变了,将前一天的日志文件名重命名为XXX%日期%索引,新的日志仍然是project_error.log -->
<!-- 2.如果日期没有发生变化,但是当前日志的文件大小超过10MB时,对当前日志进行分割 重命名-->
<appender name="error_log" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!--日志文件路径和名称-->
<File>logs/project_error.log</File>
<!--是否追加到文件末尾,默认为true-->
<append>true</append>
<!-- ThresholdFilter过滤低于指定阈值的事件。 对于等于或高于阈值的事件,ThresholdFilter将在调用其decision()方法时响应NEUTRAL。 但是,将拒绝级别低于阈值的事件 -->
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level><!-- 低于ERROR级别的日志(debug,info)将被拒绝,等于或者高于ERROR的级别将相应NEUTRAL -->
</filter>
<!--有两个与RollingFileAppender交互的重要子组件。 第一个RollingFileAppender子组件,即RollingPolicy:负责执行翻转所需的操作。
RollingFileAppender的第二个子组件,即TriggeringPolicy:将确定是否以及何时发生翻转。 因此,RollingPolicy负责什么和TriggeringPolicy负责什么时候.
作为任何用途,RollingFileAppender必须同时设置RollingPolicy和TriggeringPolicy,但是,如果其RollingPolicy也实现了TriggeringPolicy接口,则只需要显式指定前者。-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- 活动文件的名字会根据fileNamePattern的值,每隔一段时间改变一次 -->
<!-- 文件名:logs/project_error.2017-12-05.0.log -->
<!-- 注意:SizeAndTimeBasedRollingPolicy中 %i和%d令牌都是强制性的,必须存在,要不会报错 -->
<fileNamePattern>logs/project_error.%d.%i.log</fileNamePattern>
<!-- 每产生一个日志文件,该日志文件的保存期限为30天, ps:maxHistory的单位是根据fileNamePattern中的翻转策略自动推算出来的,例如上面选用了yyyy-MM-dd,则单位为天
如果上面选用了yyyy-MM,则单位为月,另外上面的单位默认为yyyy-MM-dd-->
<maxHistory>30</maxHistory>
<!-- 每个日志文件到10mb的时候开始切分,最多保留30天,但最大到20GB,哪怕没到30天也要删除多余的日志 -->
<totalSizeCap>20GB</totalSizeCap>
<!-- maxFileSize:这是活动文件的大小,默认值是10MB,测试时可改成5KB看效果 -->
<maxFileSize>10MB</maxFileSize>
</rollingPolicy>
<!--编码器-->
<encoder>
<!-- pattern节点,用来设置日志的输入格式 ps:日志文件中没有设置颜色,否则颜色部分会有ESC[0:39em等乱码-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level --- [%15.15(%thread)] %-40.40(%logger{40}) : %msg%n</pattern>
<!-- 记录日志的编码:此处设置字符集 - -->
<charset>UTF-8</charset>
</encoder>
</appender>
<!--给定记录器的每个启用的日志记录请求都将转发到该记录器中的所有appender以及层次结构中较高的appender(不用在意level值)。
换句话说,appender是从记录器层次结构中附加地继承的。
例如,如果将控制台appender添加到根记录器,则所有启用的日志记录请求将至少在控制台上打印。
@ -107,26 +30,14 @@
<root level="INFO">
<appender-ref ref="STDOUT" />
</root>
<!-- 指定项目中某个包,当有日志操作行为时的日志记录级别 -->
<!-- 级别依次为【从高到低】:FATAL > ERROR > WARN > INFO > DEBUG > TRACE -->
<logger name="com.ycwl.basic" level="INFO">
<appender-ref ref="info_log" />
<appender-ref ref="error_log" />
</logger>
<!-- 利用logback输入mybatis的sql日志,
注意:如果不加 additivity="false" 则此logger会将输出转发到自身以及祖先的logger中,就会出现日志文件中sql重复打印-->
<logger name="com.ycwl.basic.mapper" level="DEBUG" additivity="false">
<appender-ref ref="info_log" />
<appender-ref ref="error_log" />
<appender-ref ref="STDOUT" />
</logger>
<!-- additivity=false代表禁止默认累计的行为,即com.atomikos中的日志只会记录到日志文件中,不会输出层次级别更高的任何appender-->
<logger name="com.ycwl.basic" level="INFO" additivity="false">
<appender-ref ref="info_log" />
<appender-ref ref="error_log" />
<appender-ref ref="STDOUT" />
</logger>

View File

@ -43,7 +43,7 @@
WHERE DATE_ADD(#{startTime}, INTERVAL (units.i + tens.i * 10 + hundreds.i * 100) DAY) &lt;= #{endTime}
) date_series
LEFT JOIN broker_record br ON DATE(br.create_time) = date_series.date AND br.broker_id = #{brokerId}
LEFT JOIN t_stats_record s ON s.action = 'CODE_SCAN' and s.identifier = br.id
LEFT JOIN t_stats_record s ON s.action = 'CODE_SCAN' and DATE(s.create_time) = date_series.date and s.identifier = #{brokerId}
GROUP BY date_series.date
ORDER BY date_series.date
</select>

View File

@ -40,7 +40,8 @@
enable_pre_book = #{enablePreBook},
image_free = #{imageFree},
video_free = #{videoFree},
pair_device = #{pairDevice}
pair_device = #{pairDevice},
video_crop = #{videoCrop}
where id = #{id}
</update>
<update id="updateEntity">

View File

@ -306,6 +306,14 @@
<if test="endCancelTime!= null ">
and cancel_at &lt;= #{endCancelTime}
</if>
<if test="type != null">
<if test="type &lt; 0">
and o.type = #{type}
</if>
<if test="type &gt;= 0">
and o.id in (select order_id from order_item where goods_type = #{type})
</if>
</if>
</where>
order by o.create_at desc
</select>