Files
DataMate/runtime/datamate-python/app/module/evaluation/schema/prompt_template.py
hefanli 1d19cd3a62 feature: add data-evaluation
* feature: add evaluation task management function

* feature: add evaluation task detail page

* fix: delete duplicate definition for table t_model_config

* refactor: rename package synthesis to ratio

* refactor: add eval file table and  refactor related code

* fix: calling large models in parallel during evaluation
2025-12-04 09:23:54 +08:00

30 lines
963 B
Python

"""
Schema for evaluation prompt templates.
"""
from typing import List, Dict, Any
from pydantic import BaseModel, Field
class PromptTemplateDimension(BaseModel):
"""A single dimension in the prompt template"""
dimension: str = Field(..., description="Dimension name")
description: str = Field(..., description="Description of the dimension")
class PromptTemplateItem(BaseModel):
"""A single prompt template item"""
evalType: str = Field(..., description="Evaluation type")
defaultDimensions: List[PromptTemplateDimension] = Field(
default_factory=list,
description="List of default dimensions for this evaluation type"
)
prompt: str = Field(..., description="The prompt template string")
class PromptTemplateResponse(BaseModel):
"""Response model for getting prompt templates"""
templates: List[PromptTemplateItem] = Field(
...,
description="List of available prompt templates"
)