You've already forked DataMate
feature: add data-evaluation
* feature: add evaluation task management function * feature: add evaluation task detail page * fix: delete duplicate definition for table t_model_config * refactor: rename package synthesis to ratio * refactor: add eval file table and refactor related code * fix: calling large models in parallel during evaluation
This commit is contained in:
@@ -0,0 +1,45 @@
|
||||
"""
|
||||
Service for managing evaluation prompt templates.
|
||||
"""
|
||||
from typing import List, Dict, Any
|
||||
|
||||
from app.module.evaluation.schema.prompt import EVALUATION_PROMPT_TEMPLATE
|
||||
from app.module.evaluation.schema.prompt_template import (
|
||||
PromptTemplateItem,
|
||||
PromptTemplateDimension,
|
||||
PromptTemplateResponse
|
||||
)
|
||||
|
||||
|
||||
class PromptTemplateService:
|
||||
"""Service for managing evaluation prompt templates"""
|
||||
|
||||
@staticmethod
|
||||
def get_prompt_templates() -> PromptTemplateResponse:
|
||||
"""
|
||||
Get all available prompt templates
|
||||
|
||||
Returns:
|
||||
PromptTemplateResponse containing all prompt templates
|
||||
"""
|
||||
templates = []
|
||||
|
||||
for template in EVALUATION_PROMPT_TEMPLATE:
|
||||
# Convert dimensions to the proper schema
|
||||
dimensions = [
|
||||
PromptTemplateDimension(
|
||||
dimension=dim.get("dimension"),
|
||||
description=dim.get("description", "")
|
||||
)
|
||||
for dim in template.get("defaultDimensions", [])
|
||||
]
|
||||
|
||||
# Create template item
|
||||
template_item = PromptTemplateItem(
|
||||
evalType=template.get("evalType", ""),
|
||||
defaultDimensions=dimensions,
|
||||
prompt=template.get("prompt", "")
|
||||
)
|
||||
templates.append(template_item)
|
||||
|
||||
return PromptTemplateResponse(templates=templates)
|
||||
Reference in New Issue
Block a user