You've already forked DataMate
* feature: add cot data evaluation function * fix: added verification to evaluation results * fix: fix the prompt for evaluating * fix: 修复当评估结果为空导致读取失败的问题
30 lines
952 B
Python
30 lines
952 B
Python
"""
|
|
Schema for evaluation prompt templates.
|
|
"""
|
|
from typing import List
|
|
from pydantic import BaseModel, Field
|
|
|
|
|
|
class PromptTemplateDimension(BaseModel):
|
|
"""A single dimension in the prompt template"""
|
|
dimension: str = Field(..., description="Dimension name")
|
|
description: str = Field(..., description="Description of the dimension")
|
|
|
|
|
|
class PromptTemplateItem(BaseModel):
|
|
"""A single prompt template item"""
|
|
evalType: str = Field(..., description="Evaluation type")
|
|
defaultDimensions: List[PromptTemplateDimension] = Field(
|
|
default_factory=list,
|
|
description="List of default dimensions for this evaluation type"
|
|
)
|
|
prompt: str = Field(..., description="The prompt template string")
|
|
|
|
|
|
class PromptTemplateResponse(BaseModel):
|
|
"""Response model for getting prompt templates"""
|
|
templates: List[PromptTemplateItem] = Field(
|
|
...,
|
|
description="List of available prompt templates"
|
|
)
|