feat(knowledge-graph): 实现知识图谱基础设施搭建

实现功能:
- Neo4j Docker Compose 配置(社区版,端口 7474/7687,数据持久化)
- Makefile 新增 Neo4j 命令(neo4j-up/down/logs/shell)
- knowledge-graph-service Spring Boot 服务(完整的 DDD 分层架构)
- kg_extraction Python 模块(基于 LangChain LLMGraphTransformer)

技术实现:
- Neo4j 配置:环境变量化密码,统一默认值 datamate123
- Java 服务:
  - Domain: GraphEntity, GraphRelation 实体模型
  - Repository: Spring Data Neo4j,支持 graphId 范围查询
  - Service: 业务逻辑,graphId 双重校验,查询限流
  - Controller: REST API,UUID 格式校验
  - Exception: 实现 ErrorCode 接口,统一异常体系
- Python 模块:
  - KnowledgeGraphExtractor 类
  - 支持异步/同步/批量抽取
  - 支持 schema-guided 模式
  - 兼容 OpenAI 及自部署模型

关键设计:
- graphId 权限边界:所有实体操作都在正确的 graphId 范围内
- 查询限流:depth 和 limit 参数受配置约束
- 异常处理:统一使用 BusinessException + ErrorCode
- 凭据管理:环境变量化,避免硬编码
- 双重防御:Controller 格式校验 + Service 业务校验

代码审查:
- 经过 3 轮 Codex 审查和 2 轮 Claude 修复
- 所有 P0 和 P1 问题已解决
- 编译通过,无阻塞性问题

文件变更:
- 新增:Neo4j 配置、knowledge-graph-service(11 个 Java 文件)、kg_extraction(3 个 Python 文件)
- 修改:Makefile、pom.xml、application.yml、pyproject.toml
This commit is contained in:
2026-02-17 20:42:55 +08:00
parent 8f21798d57
commit 5a553ddde3
22 changed files with 1007 additions and 0 deletions

View File

@@ -0,0 +1,17 @@
from app.module.kg_extraction.extractor import KnowledgeGraphExtractor
from app.module.kg_extraction.models import (
ExtractionRequest,
ExtractionResult,
Triple,
GraphNode,
GraphEdge,
)
__all__ = [
"KnowledgeGraphExtractor",
"ExtractionRequest",
"ExtractionResult",
"Triple",
"GraphNode",
"GraphEdge",
]

View File

@@ -0,0 +1,183 @@
"""基于 LLM 的知识图谱三元组抽取器。
利用 LangChain 的 LLMGraphTransformer 从非结构化文本中抽取实体和关系,
支持 schema-guided 抽取以提升准确率。
"""
from __future__ import annotations
import logging
from typing import Sequence
from langchain_core.documents import Document
from langchain_openai import ChatOpenAI
from langchain_experimental.graph_transformers import LLMGraphTransformer
from app.module.kg_extraction.models import (
ExtractionRequest,
ExtractionResult,
ExtractionSchema,
GraphEdge,
GraphNode,
Triple,
)
logger = logging.getLogger(__name__)
class KnowledgeGraphExtractor:
"""基于 LLMGraphTransformer 的三元组抽取器。
Parameters
----------
model_name : str
OpenAI 兼容模型名称。
base_url : str | None
自定义 API base URL(用于对接 vLLM/Ollama 等本地模型服务)。
api_key : str
API 密钥。
temperature : float
生成温度,抽取任务建议使用较低值。
"""
def __init__(
self,
model_name: str = "gpt-4o-mini",
base_url: str | None = None,
api_key: str = "EMPTY",
temperature: float = 0.0,
) -> None:
self._llm = ChatOpenAI(
model=model_name,
base_url=base_url,
api_key=api_key,
temperature=temperature,
)
def _build_transformer(
self,
schema: ExtractionSchema | None = None,
) -> LLMGraphTransformer:
"""根据可选的 schema 约束构造 LLMGraphTransformer。"""
kwargs: dict = {"llm": self._llm}
if schema:
if schema.entity_types:
kwargs["allowed_nodes"] = [et.name for et in schema.entity_types]
if schema.relation_types:
kwargs["allowed_relationships"] = [rt.name for rt in schema.relation_types]
return LLMGraphTransformer(**kwargs)
async def extract(self, request: ExtractionRequest) -> ExtractionResult:
"""从文本中抽取三元组。
Parameters
----------
request : ExtractionRequest
包含文本、schema 约束等信息的抽取请求。
Returns
-------
ExtractionResult
抽取得到的节点、边和三元组。
"""
transformer = self._build_transformer(request.schema)
documents = [Document(page_content=request.text)]
try:
graph_documents = await transformer.aconvert_to_graph_documents(documents)
except Exception:
logger.exception("LLM graph extraction failed for source_id=%s", request.source_id)
return ExtractionResult(raw_text=request.text, source_id=request.source_id)
return self._convert_result(graph_documents, request)
def extract_sync(self, request: ExtractionRequest) -> ExtractionResult:
"""同步版本的三元组抽取。"""
transformer = self._build_transformer(request.schema)
documents = [Document(page_content=request.text)]
try:
graph_documents = transformer.convert_to_graph_documents(documents)
except Exception:
logger.exception("LLM graph extraction failed for source_id=%s", request.source_id)
return ExtractionResult(raw_text=request.text, source_id=request.source_id)
return self._convert_result(graph_documents, request)
async def extract_batch(
self,
requests: Sequence[ExtractionRequest],
) -> list[ExtractionResult]:
"""批量抽取。
对多段文本逐一抽取并汇总结果。
如需更高吞吐,可自行用 asyncio.gather 并发调用 extract。
"""
results: list[ExtractionResult] = []
for req in requests:
result = await self.extract(req)
results.append(result)
return results
@staticmethod
def _convert_result(
graph_documents: list,
request: ExtractionRequest,
) -> ExtractionResult:
"""将 LangChain GraphDocument 转换为内部数据模型。"""
nodes: list[GraphNode] = []
edges: list[GraphEdge] = []
triples: list[Triple] = []
seen_nodes: set[str] = set()
for doc in graph_documents:
# 收集节点
for node in doc.nodes:
node_key = f"{node.id}:{node.type}"
if node_key not in seen_nodes:
seen_nodes.add(node_key)
nodes.append(
GraphNode(
name=node.id,
type=node.type,
properties=node.properties if hasattr(node, "properties") else {},
)
)
# 收集关系
for rel in doc.relationships:
source_node = GraphNode(
name=rel.source.id,
type=rel.source.type,
)
target_node = GraphNode(
name=rel.target.id,
type=rel.target.type,
)
edges.append(
GraphEdge(
source=rel.source.id,
target=rel.target.id,
relation_type=rel.type,
properties=rel.properties if hasattr(rel, "properties") else {},
)
)
triples.append(
Triple(
subject=source_node,
predicate=rel.type,
object=target_node,
)
)
return ExtractionResult(
nodes=nodes,
edges=edges,
triples=triples,
raw_text=request.text,
source_id=request.source_id,
)

View File

@@ -0,0 +1,75 @@
"""知识图谱三元组抽取数据模型。"""
from __future__ import annotations
from pydantic import BaseModel, Field
class GraphNode(BaseModel):
"""图谱节点(实体)。"""
name: str = Field(..., description="实体名称")
type: str = Field(..., description="实体类型, 如 Person, Organization, Location")
properties: dict[str, object] = Field(default_factory=dict, description="扩展属性")
class GraphEdge(BaseModel):
"""图谱边(关系)。"""
source: str = Field(..., description="源实体名称")
target: str = Field(..., description="目标实体名称")
relation_type: str = Field(..., description="关系类型, 如 works_at, located_in")
properties: dict[str, object] = Field(default_factory=dict, description="关系属性")
class Triple(BaseModel):
"""知识三元组: (主体, 关系, 客体)。"""
subject: GraphNode
predicate: str = Field(..., description="关系类型")
object: GraphNode
class EntityTypeConstraint(BaseModel):
"""实体类型约束,用于 Schema-guided 抽取。"""
name: str = Field(..., description="类型名称")
description: str = Field(default="", description="类型说明")
class RelationTypeConstraint(BaseModel):
"""关系类型约束。"""
name: str = Field(..., description="关系类型名称")
source_types: list[str] = Field(default_factory=list, description="允许的源实体类型")
target_types: list[str] = Field(default_factory=list, description="允许的目标实体类型")
description: str = Field(default="", description="关系说明")
class ExtractionSchema(BaseModel):
"""抽取 schema 约束,约束 LLM 输出的实体和关系类型范围。"""
entity_types: list[EntityTypeConstraint] = Field(default_factory=list)
relation_types: list[RelationTypeConstraint] = Field(default_factory=list)
class ExtractionRequest(BaseModel):
"""三元组抽取请求。"""
text: str = Field(..., description="待抽取的文本")
graph_id: str = Field(..., description="目标图谱 ID")
schema: ExtractionSchema | None = Field(
default=None, description="可选的 schema 约束, 提供后做 schema-guided 抽取"
)
source_id: str | None = Field(default=None, description="来源 ID(数据集/知识库条目)")
source_type: str = Field(default="KNOWLEDGE_BASE", description="来源类型")
class ExtractionResult(BaseModel):
"""三元组抽取结果。"""
nodes: list[GraphNode] = Field(default_factory=list)
edges: list[GraphEdge] = Field(default_factory=list)
triples: list[Triple] = Field(default_factory=list)
raw_text: str = Field(default="", description="原始文本")
source_id: str | None = None