235 lines
10 KiB
Python
235 lines
10 KiB
Python
import logging
|
|
from http.client import responses
|
|
from pprint import pprint
|
|
from tqdm.asyncio import tqdm
|
|
from typing import List, Self
|
|
from living_agents import MemoryStream, LLMAgent, Character, PromptManager, Memory
|
|
from living_agents.datatypes import CharacterTemplate
|
|
from llm_connector import LLMMessage
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class CharacterAgent:
|
|
"""Enhanced agent with Stanford's memory architecture"""
|
|
|
|
def __init__(self, character: Character, llm: LLMAgent):
|
|
self.character = character
|
|
self.llm = llm
|
|
self.memory_stream = MemoryStream(llm)
|
|
self.current_plan: List[str] = []
|
|
|
|
async def initialize_memories(self):
|
|
"""Initialize agent with background memories"""
|
|
background_facts = [
|
|
f"My name is {self.character.name} and I am {self.character.age} years old",
|
|
f"My personality: {self.character.personality}",
|
|
f"My occupation: {self.character.occupation}",
|
|
f"I live in {self.character.location}"
|
|
]
|
|
|
|
for fact in background_facts:
|
|
await self.memory_stream.add_observation(self.character, fact)
|
|
|
|
for person, relationship in self.character.relationships.items():
|
|
await self.memory_stream.add_observation(self.character, f"My relationship with {person}: {relationship}")
|
|
|
|
async def perceive(self, observation: str, skip_scoring=False) -> None:
|
|
"""Add new observation to memory stream"""
|
|
if skip_scoring:
|
|
await self.memory_stream.add_observation(observation)
|
|
else:
|
|
await self.memory_stream.add_observation(observation, self._score_memory_importance)
|
|
|
|
async def react_to_situation(self, situation: str) -> str:
|
|
"""Generate reaction based on memory and character"""
|
|
# Retrieve relevant memories
|
|
relevant_memories = await self.memory_stream.retrieve_related_memories(situation, k=8)
|
|
memory_context = "\n".join([f"- {m.description}" for m in relevant_memories])
|
|
|
|
context = {
|
|
'character': self._get_character_prompt(),
|
|
'character_name': self.character.name,
|
|
'memory_context': memory_context,
|
|
'situation': situation}
|
|
prompt = PromptManager.get_prompt('react_to_situation', context)
|
|
|
|
response = await self.llm.chat([{"role": "user", "content": prompt}])
|
|
|
|
# create new memories from interaction
|
|
interaction_context = {
|
|
'situation': f'I reacted to: \n{situation}',
|
|
'response': f'My response was: \n{response}',
|
|
}
|
|
prompt, schema = PromptManager.get_prompt_with_schema('extract_interaction_memories', interaction_context)
|
|
memories_response = await self.llm.client.get_structured_response([{"role": "user", "content": prompt}], schema)
|
|
for new_memory in memories_response['memories']:
|
|
await self.perceive(new_memory)
|
|
|
|
return response
|
|
|
|
async def plan_day(self) -> List[str]:
|
|
"""Generate high-level daily plan"""
|
|
# Retrieve relevant memories about goals, habits, schedule
|
|
relevant_memories = await self.memory_stream.retrieve_related_memories(
|
|
f"{self.character.name} daily routine goals schedule", k=5
|
|
)
|
|
|
|
memory_context = "\n".join([m.description for m in relevant_memories])
|
|
|
|
plan_prompt = f"""You are {self.character.name}.
|
|
Background: {self.character.personality}
|
|
Occupation: {self.character.occupation}
|
|
|
|
Relevant memories:
|
|
{memory_context}
|
|
|
|
Plan your day in broad strokes (5-8 activities with times):
|
|
1)"""
|
|
|
|
try:
|
|
response = await self.llm.chat([{"role": "user", "content": plan_prompt}], max_tokens=300)
|
|
plan_steps = [f"1){response}"] if response else ["1) Go about my daily routine"]
|
|
|
|
# Add plan to memory
|
|
plan_description = f"Daily plan: {'; '.join(plan_steps)}"
|
|
await self.memory_stream.add_observation(self.character, plan_description)
|
|
|
|
return plan_steps
|
|
except:
|
|
return ["1) Go about my daily routine"]
|
|
|
|
async def get_summary(self) -> str:
|
|
"""Generate current summary based on memories and reflections"""
|
|
reflections = [m for m in self.memory_stream.memories if m.memory_type == "reflection"]
|
|
recent_observations = self.memory_stream.memories[-10:]
|
|
|
|
summary_memories = reflections[-3:] + recent_observations[-5:]
|
|
memory_context = "\n".join([m.description for m in summary_memories])
|
|
|
|
summary_prompt = f"""Based on the following memories and reflections, provide a brief summary of who {self.character.name} is and what they care about:
|
|
|
|
{memory_context}
|
|
|
|
Summary:"""
|
|
|
|
try:
|
|
return await self.llm.chat([{"role": "user", "content": summary_prompt}], max_tokens=150)
|
|
except:
|
|
return f"{self.character.name} is a {self.character.age}-year-old {self.character.occupation}."
|
|
|
|
async def _get_related_memories_for_scoring(self, memory_text: str, exclude_self=None, k=5) -> List:
|
|
"""Get memories related to the one being scored"""
|
|
# Get embedding for the memory being scored
|
|
memory_embedding = await self.llm.get_embedding(memory_text)
|
|
|
|
# Calculate similarity to other memories
|
|
similarities = []
|
|
for mem in self.memory_stream.memories:
|
|
if mem == exclude_self:
|
|
continue
|
|
|
|
if mem.embedding:
|
|
from sklearn.metrics.pairwise import cosine_similarity
|
|
similarity = cosine_similarity([memory_embedding], [mem.embedding])[0][0]
|
|
similarities.append((similarity, mem))
|
|
|
|
# Return top K most similar memories
|
|
similarities.sort(reverse=True, key=lambda x: x[0])
|
|
return [mem for _, mem in similarities[:k]]
|
|
|
|
async def _score_memory_importance(self, memory: Memory) -> int:
|
|
"""Score importance with related memories as context"""
|
|
|
|
related_memories = await self._get_related_memories_for_scoring(memory.description, exclude_self=memory, k=5)
|
|
|
|
prompt_context = {'character': self._get_character_prompt(),
|
|
'related_memories': "\n".join([m.description for m in related_memories]),
|
|
'memory_text': memory.description,
|
|
'memory_type': memory.memory_type}
|
|
|
|
prompt = PromptManager.get_prompt('score_importance_with_context', prompt_context)
|
|
|
|
try:
|
|
response = await self.llm.chat([{"role": "user", "content": prompt}], max_tokens=5)
|
|
score = int(''.join(filter(str.isdigit, response))[:1] or "5")
|
|
return max(1, min(10, score))
|
|
except:
|
|
return 5 # Default
|
|
|
|
async def _extract_character_from_memories(self) -> Character:
|
|
"""Extract Character info from memories using JSON"""
|
|
|
|
# Get different types of memories with targeted queries
|
|
identity_memories = await self.memory_stream.retrieve_related_memories("my name age personality traits")
|
|
relationship_memories = await self.memory_stream.retrieve_related_memories("relationship with others friends family")
|
|
goal_memories = await self.memory_stream.retrieve_related_memories("goals plans dreams wants to do")
|
|
work_memories = await self.memory_stream.retrieve_related_memories("job work occupation career")
|
|
all_memories = identity_memories + relationship_memories + goal_memories + work_memories
|
|
memory_context = "\n".join([m.description for m in all_memories])
|
|
|
|
prompt_context = {'memory_context': memory_context}
|
|
prompt, schema = PromptManager.get_prompt_with_schema('extract_character_from_memories', prompt_context)
|
|
|
|
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
|
|
response = await self.llm.client.get_structured_response(messages, schema)
|
|
|
|
return Character(
|
|
name=response.get("name", self.character.name),
|
|
age=response.get("age", self.character.age),
|
|
personality=response.get("personality", ""),
|
|
occupation=response.get("occupation", ""),
|
|
location=response.get("location", ""),
|
|
relationships=response.get("relationships", {}),
|
|
goals=response.get("goals", [])
|
|
)
|
|
|
|
def _get_character_prompt(self):
|
|
context = {
|
|
"character_name": self.character.name,
|
|
"character_age": self.character.age,
|
|
"character_personality": self.character.personality,
|
|
"character_occupation": self.character.occupation,
|
|
"character_location": self.character.location}
|
|
return PromptManager.get_prompt('character_summary', context)
|
|
|
|
@classmethod
|
|
async def create_from_template(cls, template: CharacterTemplate) -> Self:
|
|
"""Factory method to create CharacterAgent from YAML template"""
|
|
|
|
# Create temporary character with minimal info (will be updated from memories)
|
|
temp_character = Character(name=template.get('name', 'Unknown'))
|
|
|
|
# Create instance with LLM
|
|
instance = cls(temp_character, LLMAgent())
|
|
|
|
# Add all memories with default importance (skip scoring)
|
|
logger.info(f"Loading memories for {temp_character.name}")
|
|
|
|
# Add observations
|
|
for observation in template.get('observations', []):
|
|
await instance.memory_stream.add_memory(Memory(description=observation, memory_type='observation'))
|
|
|
|
# Add reflections
|
|
for reflection in template.get('reflections', []):
|
|
await instance.memory_stream.add_memory(Memory(description=reflection, memory_type='reflection'))
|
|
|
|
# Add plans
|
|
for plan in template.get('plans', []):
|
|
await instance.memory_stream.add_memory(Memory(description=plan, memory_type='plan'))
|
|
|
|
# create the character before we score to include the character in the prompts
|
|
# Extract character info from memories to populate Character object
|
|
logger.info(f"Creating Character...")
|
|
instance.character = await instance._extract_character_from_memories()
|
|
|
|
logger.info(f"Added {len(instance.memory_stream.memories)} memories, now scoring importance...")
|
|
|
|
# Score all memories with full context
|
|
for memory in tqdm(instance.memory_stream.memories, desc="Scoring memory importance", unit="memory"):
|
|
# Score with related context
|
|
memory.importance_score = await instance._score_memory_importance(memory)
|
|
|
|
logger.info(f"Character {instance.character.name} created successfully")
|
|
return instance
|