-
Notifications
You must be signed in to change notification settings - Fork 0
/
answer_grader.py
34 lines (26 loc) · 1.18 KB
/
answer_grader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.runnables import RunnableSequence
from langchain_core.language_models.chat_models import BaseChatModel
class GradeAnswer(BaseModel):
binary_score: bool = Field(
description="Answer addresses the question, 'yes' or 'no'"
)
class GradeAnswerChain:
def __init__(self, model: BaseChatModel):
self.model = model
structured_llm_grader = self.model.with_structured_output(GradeAnswer)
system = """You are an evaluator who evaluates whether a generated response is well written and can be a good response to user input \n
Give a binary score 'yes' or 'no'. Yes' means that the answer is good."""
answer_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
(
"human",
"User input: \n\n {question} \n\n LLM generation: {generation}",
),
]
)
self.answer_grader: RunnableSequence = answer_prompt | structured_llm_grader
def get_chain(self):
return self.answer_grader