Reflection/self-reflection is a bit underrated. If your application relies on prompting, I strongly recommend exploring this concept. It is not hard to implement and reflective techniques can help in iteratively refining llm responses.
from mirascope.core import BaseMessageParam, ResponseModelConfigDict, openai
from pydantic import BaseModel
import os
os.environ["OPENAI_API_KEY"] = ""
class Review(BaseModel):
issues: list[str]
is_good: bool
model_config = ResponseModelConfigDict(strict=True)
class Story(BaseModel):
story: str
model_config = ResponseModelConfigDict(strict=True)
class StoryWriter(BaseModel):
keywords: list[str]
generator_history: list[openai.OpenAIMessageParam] = []
@openai.call(
"gpt-4o-mini",
response_model=Story,
json_mode=True,
call_params={"temperature": 0.8},
)
def generator(self, query: str) -> list[openai.OpenAIMessageParam]:
return [
BaseMessageParam(
role="system",
content="You are an expert in writing short moral stories for kids below the age of 10.",
),
*self.generator_history,
]
@openai.call(
"gpt-4o-mini",
response_model=Review,
json_mode=True,
call_params={"temperature": 0.1},
)
def reviewer(self, story: str) -> list[openai.OpenAIMessageParam]:
return [
BaseMessageParam(
role="system",
content="You are an expert in reviewing short moral stories for kids below the age of 10, checking whether all the keywords were used effectively and identifying issues related to relevance and ease of understanding",
),
BaseMessageParam(
role="user",
content=f""" Review the given moral story for kids. Check if the story uses all the given keywords. Also check if the story is reasonably realistic, engaging and uses basic vocabulary that is easy to understand for kids below the age of 10. Return the issues. Finally, return True if the moral story is good enough for kids and contains all the keywords. \n story: {story} \n keywords: {self.keywords}""",
),
]
def run(self, steps=3) -> str:
query = f"""Generate a moral story for kids, using all the given keywords. Return only the story. {self.keywords}"""
self.generator_history += [
BaseMessageParam(role="user", content=query),
]
story = ""
for _ in range(steps):
generator_response = self.generator(query)
story = generator_response.story
reviewer_response = self.reviewer(story)
if reviewer_response.is_good:
break
query = f"""Use the given feedback to improve the story. Return only the story."""
self.generator_history += [
BaseMessageParam(role="assistant", content=generator_response.story),
BaseMessageParam(
role="user",
content=" ".join(reviewer_response.issues) + " " + query,
),
]
print(self.generator_history)
return story
story = StoryWriter(
keywords=[
"elephant",
"boy",
"strong",
"funny",
"good",
"ride",
"Nikolas",
"road",
"cap",
"car",
]
).run()
print("==================")
print("result", story)
Top comments (0)