feature: add data-evaluation

* feature: add evaluation task management function

* feature: add evaluation task detail page

* fix: delete duplicate definition for table t_model_config

* refactor: rename package synthesis to ratio

* refactor: add eval file table and  refactor related code

* fix: calling large models in parallel during evaluation
This commit is contained in:
hefanli
2025-12-04 09:23:54 +08:00
committed by GitHub
parent 265e284fb8
commit 1d19cd3a62
52 changed files with 2882 additions and 1244 deletions

View File

@@ -0,0 +1,45 @@
"""
Service for managing evaluation prompt templates.
"""
from typing import List, Dict, Any
from app.module.evaluation.schema.prompt import EVALUATION_PROMPT_TEMPLATE
from app.module.evaluation.schema.prompt_template import (
PromptTemplateItem,
PromptTemplateDimension,
PromptTemplateResponse
)
class PromptTemplateService:
"""Service for managing evaluation prompt templates"""
@staticmethod
def get_prompt_templates() -> PromptTemplateResponse:
"""
Get all available prompt templates
Returns:
PromptTemplateResponse containing all prompt templates
"""
templates = []
for template in EVALUATION_PROMPT_TEMPLATE:
# Convert dimensions to the proper schema
dimensions = [
PromptTemplateDimension(
dimension=dim.get("dimension"),
description=dim.get("description", "")
)
for dim in template.get("defaultDimensions", [])
]
# Create template item
template_item = PromptTemplateItem(
evalType=template.get("evalType", ""),
defaultDimensions=dimensions,
prompt=template.get("prompt", "")
)
templates.append(template_item)
return PromptTemplateResponse(templates=templates)