Files
LivingAgents/living_agents/character_agent.py
2025-09-02 04:41:06 +02:00

260 lines
12 KiB
Python

import logging
import random
from datetime import datetime
from http.client import responses
from pprint import pprint
from tqdm.asyncio import tqdm
from typing import List, Self
from living_agents import MemoryStream, LLMAgent, Character, PromptManager, Memory
from living_agents.datatypes import CharacterTemplate, CharacterTrait
from llm_connector import LLMMessage
logger = logging.getLogger(__name__)
class CharacterAgent:
"""Enhanced agent with Stanford's memory architecture"""
def __init__(self, character: Character, llm: LLMAgent):
self.character = character
self.llm = llm
self.memory_stream = MemoryStream(llm)
self.current_plan: List[str] = []
async def initialize_memories(self):
"""Initialize agent with background memories"""
background_facts = [
f"My name is {self.character.name} and I am {self.character.age} years old",
f"My personality: {self.character.personality}",
f"My occupation: {self.character.occupation}",
f"I live in {self.character.location}"
]
for fact in background_facts:
await self.memory_stream.add_observation(self.character, fact)
for person, relationship in self.character.relationships.items():
await self.memory_stream.add_observation(self.character, f"My relationship with {person}: {relationship}")
async def perceive(self, observation: str, skip_scoring=False) -> None:
"""Add new observation to memory stream"""
if skip_scoring:
new_memory = await self.memory_stream.add_observation(observation)
else:
new_memory = await self.memory_stream.add_observation(observation, self._score_memory_importance)
await self._analyze_trait_impact(new_memory)
async def react_to_situation(self, situation: str) -> str:
"""Generate reaction based on memory and character"""
# Retrieve relevant memories
relevant_memories = await self.memory_stream.retrieve_related_memories(situation, k=8)
memory_context = "\n".join([f"- {m.description}" for m in relevant_memories])
context = {
'character': self._get_character_prompt(),
'character_name': self.character.name,
'memory_context': memory_context,
'situation': situation}
prompt = PromptManager.get_prompt('react_to_situation', context)
response = await self.llm.chat([{"role": "user", "content": prompt}])
# create new memories from interaction
interaction_context = {
'situation': f'I reacted to: \n{situation}',
'response': f'My response was: \n{response}',
}
prompt, schema = PromptManager.get_prompt_with_schema('extract_interaction_memories', interaction_context)
memories_response = await self.llm.client.get_structured_response([{"role": "user", "content": prompt}], schema)
for new_memory in memories_response['memories']:
await self.perceive(new_memory)
return response
async def plan_day(self) -> List[str]:
"""Generate high-level daily plan"""
# Retrieve relevant memories about goals, habits, schedule
relevant_memories = await self.memory_stream.retrieve_related_memories(
f"{self.character.name} daily routine goals schedule", k=5
)
memory_context = "\n".join([m.description for m in relevant_memories])
plan_prompt = f"""You are {self.character.name}.
Background: {self.character.personality}
Occupation: {self.character.occupation}
Relevant memories:
{memory_context}
Plan your day in broad strokes (5-8 activities with times):
1)"""
try:
response = await self.llm.chat([{"role": "user", "content": plan_prompt}], max_tokens=300)
plan_steps = [f"1){response}"] if response else ["1) Go about my daily routine"]
# Add plan to memory
plan_description = f"Daily plan: {'; '.join(plan_steps)}"
await self.memory_stream.add_observation(self.character, plan_description)
return plan_steps
except:
return ["1) Go about my daily routine"]
async def get_summary(self) -> str:
"""Generate current summary based on memories and reflections"""
reflections = [m for m in self.memory_stream.memories if m.memory_type == "reflection"]
recent_observations = self.memory_stream.memories[-10:]
summary_memories = reflections[-3:] + recent_observations[-5:]
memory_context = "\n".join([m.description for m in summary_memories])
summary_prompt = f"""Based on the following memories and reflections, provide a brief summary of who {self.character.name} is and what they care about:
{memory_context}
Summary:"""
try:
return await self.llm.chat([{"role": "user", "content": summary_prompt}], max_tokens=150)
except:
return f"{self.character.name} is a {self.character.age}-year-old {self.character.occupation}."
async def _score_memory_importance(self, memory: Memory) -> int:
"""Score importance with related memories as context"""
related_memories = await self.memory_stream.get_related_memories_for_scoring(memory.description, exclude_self=memory, k=5)
prompt_context = {'character_context': self._get_character_prompt(),
'character_name': self.character.name,
'related_memories': "\n".join([m.description for m in related_memories]),
'memory_text': memory.description}
if memory.memory_type == 'observation':
prompt = PromptManager.get_prompt('score_observation_importance', prompt_context)
elif memory.memory_type == 'reflection':
prompt = PromptManager.get_prompt('score_reflection_importance', prompt_context)
elif memory.memory_type == 'plan':
prompt = PromptManager.get_prompt('score_plan_importance', prompt_context)
# if reflection or plan, add related memories.
if memory.memory_type == 'reflection' or memory.memory_type == 'plan':
for rel_memory in related_memories:
memory.related_memories.append(rel_memory)
try:
response = await self.llm.chat([{"role": "user", "content": prompt}], max_tokens=5)
score = int(''.join(filter(str.isdigit, response))[:1] or "5")
return max(1, min(10, score))
except:
return 5 # Default
async def _analyze_trait_impact(self, memory: Memory):
traits_summary = "\n".join([f" - {trait.strength}/10 {trait.name} ({trait.description})" for trait in self.character.traits]) if self.character.traits else "No traits yet."
prompt_context = {'character_name': self.character.name,
'current_traits': traits_summary,
'new_observation': memory.description}
prompt, schema = PromptManager.get_prompt_with_schema('assess_trait_impact', prompt_context)
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
response = await self.llm.client.get_structured_response(messages, schema)
for trait_update in response['trait_updates']:
trait_to_update = self.character.get_trait(trait_update['trait_name'], trait_update['description'])
if trait_update['action'] == 'create' or trait_update['action'] == 'strengthen':
await self._strengthen_trait(trait_to_update)
else:
await self._weaken_trait(trait_to_update)
@staticmethod
async def _strengthen_trait(trait: CharacterTrait, steepness: float = 1.0):
if trait.strength >= 10:
return
if random.random() < trait.change_by_probability(steepness):
trait.strength += 1
trait.updated = datetime.now()
async def _weaken_trait(self, trait: CharacterTrait, steepness: float = 1.0):
if random.random() < trait.change_by_probability(steepness):
trait.strength -= 1
trait.updated = datetime.now()
if trait.strength <= 0:
self.character.traits.remove(trait)
async def _generate_character_from_memories(self) -> Character:
"""Extract Character info from memories using JSON"""
# Get different types of memories with targeted queries
identity_memories = await self.memory_stream.retrieve_related_memories("my name age personality traits")
relationship_memories = await self.memory_stream.retrieve_related_memories("relationship with others friends family")
goal_memories = await self.memory_stream.retrieve_related_memories("goals plans dreams wants to do")
work_memories = await self.memory_stream.retrieve_related_memories("job work occupation career")
all_memories = identity_memories + relationship_memories + goal_memories + work_memories
memory_context = "\n".join([m.description for m in all_memories])
prompt_context = {'memory_context': memory_context}
prompt, schema = PromptManager.get_prompt_with_schema('extract_character_from_memories', prompt_context)
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
response = await self.llm.client.get_structured_response(messages, schema)
return Character(
name=response.get("name", self.character.name),
age=response.get("age", self.character.age),
personality=response.get("personality", ""),
occupation=response.get("occupation", ""),
location=response.get("location", ""),
relationships=response.get("relationships", {}),
goals=response.get("goals", [])
)
def _get_character_prompt(self):
context = {
"character_name": self.character.name,
"character_age": self.character.age,
"character_personality": self.character.personality,
"character_occupation": self.character.occupation,
"character_location": self.character.location}
return PromptManager.get_prompt('character_summary', context)
@classmethod
async def create_from_template(cls, template: CharacterTemplate) -> Self:
"""Factory method to create CharacterAgent from YAML template"""
# Create temporary character with minimal info (will be updated from memories)
temp_character = Character(name=template.get('name', 'Unknown'))
# Create instance with LLM
instance = cls(temp_character, LLMAgent())
# Add all memories with default importance (skip scoring)
logger.info(f"Loading memories for {temp_character.name}")
# Add observations
for observation in template.get('observations', []):
await instance.memory_stream.add_memory(Memory(description=observation, memory_type='observation'))
# Add reflections
for reflection in template.get('reflections', []):
await instance.memory_stream.add_memory(Memory(description=reflection, memory_type='reflection'))
# Add plans
for plan in template.get('plans', []):
await instance.memory_stream.add_memory(Memory(description=plan, memory_type='plan'))
# create the character before we score to include the character in the prompts
# Extract character info from memories to populate Character object
logger.info(f"Creating Character...")
instance.character = await instance._generate_character_from_memories()
logger.info(f"Added {len(instance.memory_stream.memories)} memories, now scoring importance...")
# Score all observations with importance
observations = [memory for memory in instance.memory_stream.memories if memory.memory_type == 'observation']
for memory in tqdm(observations, desc="Scoring memory importance", unit="memory"):
# Score with related context
memory.importance_score = await instance._score_memory_importance(memory)
await instance._analyze_trait_impact(memory)
logger.info(f"Character {instance.character.name} created successfully")
return instance