You've already forked FrameTour-BE
refactor(utils): 替换雪花ID生成工具实现
- 移除自定义的雪花ID生成逻辑 - 引入Hutool的Snowflake工具类 - 简化ID生成方法,提高代码可维护性 - 移除相关的测试类文件 - 删除不再使用的UniqueId和UniqueIdMetaData模型类
This commit is contained in:
@@ -1,92 +0,0 @@
|
||||
package com.ycwl.basic.utils;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
/**
|
||||
* Test for SnowFlakeUtil to verify uniqueness under high concurrency.
|
||||
*/
|
||||
public class SnowFlakeUtilTest {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(SnowFlakeUtilTest.class);
|
||||
|
||||
@Test
|
||||
public void testIdUniqueness() throws InterruptedException {
|
||||
// Number of threads
|
||||
int threadCount = 1000;
|
||||
// Number of IDs per thread
|
||||
int idCountPerThread = 10000;
|
||||
// Total IDs expected
|
||||
int totalIdCount = threadCount * idCountPerThread;
|
||||
|
||||
ExecutorService executorService = Executors.newFixedThreadPool(threadCount);
|
||||
final Set<String> idSet = ConcurrentHashMap.newKeySet();
|
||||
final CountDownLatch latch = new CountDownLatch(threadCount);
|
||||
|
||||
// Use a set to capture any "suspicious" short IDs (potential raw timestamps)
|
||||
final Set<String> suspiciousIds = ConcurrentHashMap.newKeySet();
|
||||
|
||||
log.info("Starting concurrent snowflake ID generation test...");
|
||||
long start = System.currentTimeMillis();
|
||||
|
||||
for (int i = 0; i < threadCount; i++) {
|
||||
executorService.execute(() -> {
|
||||
try {
|
||||
for (int j = 0; j < idCountPerThread; j++) {
|
||||
String id = SnowFlakeUtil.getId();
|
||||
idSet.add(id);
|
||||
|
||||
// Check for the potential overflow bug where it returns raw timestamp
|
||||
// A valid snowflake ID (around year 2025) should be much larger than a timestamp
|
||||
// Current timestamp is approx 13 digits (1734...)
|
||||
// Snowflake ID should be approx 18-19 digits
|
||||
if (id.length() < 15) {
|
||||
suspiciousIds.add(id);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Error generating ID", e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
latch.await();
|
||||
long end = System.currentTimeMillis();
|
||||
executorService.shutdown();
|
||||
|
||||
log.info("Generated {} IDs in {} ms", totalIdCount, (end - start));
|
||||
|
||||
if (!suspiciousIds.isEmpty()) {
|
||||
log.warn("Found {} suspicious IDs (likely raw timestamps due to sequence overflow): {}", suspiciousIds.size(), suspiciousIds);
|
||||
// We might not fail the test for this if the user only asked for uniqueness,
|
||||
// but it's good to report.
|
||||
// However, if they are raw timestamps, they collide heavily if generated in the same ms.
|
||||
}
|
||||
|
||||
Assertions.assertEquals(totalIdCount, idSet.size(), "Duplicate IDs found!");
|
||||
log.info("Uniqueness test passed. Total unique IDs: {}", idSet.size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPerformanceSingleThread() {
|
||||
long start = System.currentTimeMillis();
|
||||
int count = 100000;
|
||||
for (int i = 0; i < count; i++) {
|
||||
SnowFlakeUtil.getId();
|
||||
}
|
||||
long end = System.currentTimeMillis();
|
||||
log.info("Generated {} IDs in {} ms (Single Thread)", count, (end - start));
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user