too much
This commit is contained in:
8
living_agents/__init__.py
Normal file
8
living_agents/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from .prompt_manager import PromptManager
|
||||
from .datatypes import Character, Memory
|
||||
from .llmagent import LLMAgent
|
||||
from .memory_stream import MemoryStream
|
||||
from .character_agent import CharacterAgent
|
||||
from .roleplay_system import RoleplaySystem
|
||||
|
||||
__all__ = ['Character', 'Memory', 'CharacterAgent', 'RoleplaySystem', 'LLMAgent', 'MemoryStream', 'PromptManager']
|
||||
234
living_agents/character_agent.py
Normal file
234
living_agents/character_agent.py
Normal file
@@ -0,0 +1,234 @@
|
||||
import logging
|
||||
from http.client import responses
|
||||
from pprint import pprint
|
||||
from tqdm.asyncio import tqdm
|
||||
from typing import List, Self
|
||||
from living_agents import MemoryStream, LLMAgent, Character, PromptManager, Memory
|
||||
from living_agents.datatypes import CharacterTemplate
|
||||
from llm_connector import LLMMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CharacterAgent:
|
||||
"""Enhanced agent with Stanford's memory architecture"""
|
||||
|
||||
def __init__(self, character: Character, llm: LLMAgent):
|
||||
self.character = character
|
||||
self.llm = llm
|
||||
self.memory_stream = MemoryStream(llm)
|
||||
self.current_plan: List[str] = []
|
||||
|
||||
async def initialize_memories(self):
|
||||
"""Initialize agent with background memories"""
|
||||
background_facts = [
|
||||
f"My name is {self.character.name} and I am {self.character.age} years old",
|
||||
f"My personality: {self.character.personality}",
|
||||
f"My occupation: {self.character.occupation}",
|
||||
f"I live in {self.character.location}"
|
||||
]
|
||||
|
||||
for fact in background_facts:
|
||||
await self.memory_stream.add_observation(self.character, fact)
|
||||
|
||||
for person, relationship in self.character.relationships.items():
|
||||
await self.memory_stream.add_observation(self.character, f"My relationship with {person}: {relationship}")
|
||||
|
||||
async def perceive(self, observation: str, skip_scoring=False) -> None:
|
||||
"""Add new observation to memory stream"""
|
||||
if skip_scoring:
|
||||
await self.memory_stream.add_observation(observation)
|
||||
else:
|
||||
await self.memory_stream.add_observation(observation, self._score_memory_importance)
|
||||
|
||||
async def react_to_situation(self, situation: str) -> str:
|
||||
"""Generate reaction based on memory and character"""
|
||||
# Retrieve relevant memories
|
||||
relevant_memories = await self.memory_stream.retrieve_related_memories(situation, k=8)
|
||||
memory_context = "\n".join([f"- {m.description}" for m in relevant_memories])
|
||||
|
||||
context = {
|
||||
'character': self._get_character_prompt(),
|
||||
'character_name': self.character.name,
|
||||
'memory_context': memory_context,
|
||||
'situation': situation}
|
||||
prompt = PromptManager.get_prompt('react_to_situation', context)
|
||||
|
||||
response = await self.llm.chat([{"role": "user", "content": prompt}])
|
||||
|
||||
# create new memories from interaction
|
||||
interaction_context = {
|
||||
'situation': f'I reacted to: \n{situation}',
|
||||
'response': f'My response was: \n{response}',
|
||||
}
|
||||
prompt, schema = PromptManager.get_prompt_with_schema('extract_interaction_memories', interaction_context)
|
||||
memories_response = await self.llm.client.get_structured_response([{"role": "user", "content": prompt}], schema)
|
||||
for new_memory in memories_response['memories']:
|
||||
await self.perceive(new_memory)
|
||||
|
||||
return response
|
||||
|
||||
async def plan_day(self) -> List[str]:
|
||||
"""Generate high-level daily plan"""
|
||||
# Retrieve relevant memories about goals, habits, schedule
|
||||
relevant_memories = await self.memory_stream.retrieve_related_memories(
|
||||
f"{self.character.name} daily routine goals schedule", k=5
|
||||
)
|
||||
|
||||
memory_context = "\n".join([m.description for m in relevant_memories])
|
||||
|
||||
plan_prompt = f"""You are {self.character.name}.
|
||||
Background: {self.character.personality}
|
||||
Occupation: {self.character.occupation}
|
||||
|
||||
Relevant memories:
|
||||
{memory_context}
|
||||
|
||||
Plan your day in broad strokes (5-8 activities with times):
|
||||
1)"""
|
||||
|
||||
try:
|
||||
response = await self.llm.chat([{"role": "user", "content": plan_prompt}], max_tokens=300)
|
||||
plan_steps = [f"1){response}"] if response else ["1) Go about my daily routine"]
|
||||
|
||||
# Add plan to memory
|
||||
plan_description = f"Daily plan: {'; '.join(plan_steps)}"
|
||||
await self.memory_stream.add_observation(self.character, plan_description)
|
||||
|
||||
return plan_steps
|
||||
except:
|
||||
return ["1) Go about my daily routine"]
|
||||
|
||||
async def get_summary(self) -> str:
|
||||
"""Generate current summary based on memories and reflections"""
|
||||
reflections = [m for m in self.memory_stream.memories if m.memory_type == "reflection"]
|
||||
recent_observations = self.memory_stream.memories[-10:]
|
||||
|
||||
summary_memories = reflections[-3:] + recent_observations[-5:]
|
||||
memory_context = "\n".join([m.description for m in summary_memories])
|
||||
|
||||
summary_prompt = f"""Based on the following memories and reflections, provide a brief summary of who {self.character.name} is and what they care about:
|
||||
|
||||
{memory_context}
|
||||
|
||||
Summary:"""
|
||||
|
||||
try:
|
||||
return await self.llm.chat([{"role": "user", "content": summary_prompt}], max_tokens=150)
|
||||
except:
|
||||
return f"{self.character.name} is a {self.character.age}-year-old {self.character.occupation}."
|
||||
|
||||
async def _get_related_memories_for_scoring(self, memory_text: str, exclude_self=None, k=5) -> List:
|
||||
"""Get memories related to the one being scored"""
|
||||
# Get embedding for the memory being scored
|
||||
memory_embedding = await self.llm.get_embedding(memory_text)
|
||||
|
||||
# Calculate similarity to other memories
|
||||
similarities = []
|
||||
for mem in self.memory_stream.memories:
|
||||
if mem == exclude_self:
|
||||
continue
|
||||
|
||||
if mem.embedding:
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
similarity = cosine_similarity([memory_embedding], [mem.embedding])[0][0]
|
||||
similarities.append((similarity, mem))
|
||||
|
||||
# Return top K most similar memories
|
||||
similarities.sort(reverse=True, key=lambda x: x[0])
|
||||
return [mem for _, mem in similarities[:k]]
|
||||
|
||||
async def _score_memory_importance(self, memory: Memory) -> int:
|
||||
"""Score importance with related memories as context"""
|
||||
|
||||
related_memories = await self._get_related_memories_for_scoring(memory.description, exclude_self=memory, k=5)
|
||||
|
||||
prompt_context = {'character': self._get_character_prompt(),
|
||||
'related_memories': "\n".join([m.description for m in related_memories]),
|
||||
'memory_text': memory.description,
|
||||
'memory_type': memory.memory_type}
|
||||
|
||||
prompt = PromptManager.get_prompt('score_importance_with_context', prompt_context)
|
||||
|
||||
try:
|
||||
response = await self.llm.chat([{"role": "user", "content": prompt}], max_tokens=5)
|
||||
score = int(''.join(filter(str.isdigit, response))[:1] or "5")
|
||||
return max(1, min(10, score))
|
||||
except:
|
||||
return 5 # Default
|
||||
|
||||
async def _extract_character_from_memories(self) -> Character:
|
||||
"""Extract Character info from memories using JSON"""
|
||||
|
||||
# Get different types of memories with targeted queries
|
||||
identity_memories = await self.memory_stream.retrieve_related_memories("my name age personality traits")
|
||||
relationship_memories = await self.memory_stream.retrieve_related_memories("relationship with others friends family")
|
||||
goal_memories = await self.memory_stream.retrieve_related_memories("goals plans dreams wants to do")
|
||||
work_memories = await self.memory_stream.retrieve_related_memories("job work occupation career")
|
||||
all_memories = identity_memories + relationship_memories + goal_memories + work_memories
|
||||
memory_context = "\n".join([m.description for m in all_memories])
|
||||
|
||||
prompt_context = {'memory_context': memory_context}
|
||||
prompt, schema = PromptManager.get_prompt_with_schema('extract_character_from_memories', prompt_context)
|
||||
|
||||
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
|
||||
response = await self.llm.client.get_structured_response(messages, schema)
|
||||
|
||||
return Character(
|
||||
name=response.get("name", self.character.name),
|
||||
age=response.get("age", self.character.age),
|
||||
personality=response.get("personality", ""),
|
||||
occupation=response.get("occupation", ""),
|
||||
location=response.get("location", ""),
|
||||
relationships=response.get("relationships", {}),
|
||||
goals=response.get("goals", [])
|
||||
)
|
||||
|
||||
def _get_character_prompt(self):
|
||||
context = {
|
||||
"character_name": self.character.name,
|
||||
"character_age": self.character.age,
|
||||
"character_personality": self.character.personality,
|
||||
"character_occupation": self.character.occupation,
|
||||
"character_location": self.character.location}
|
||||
return PromptManager.get_prompt('character_summary', context)
|
||||
|
||||
@classmethod
|
||||
async def create_from_template(cls, template: CharacterTemplate) -> Self:
|
||||
"""Factory method to create CharacterAgent from YAML template"""
|
||||
|
||||
# Create temporary character with minimal info (will be updated from memories)
|
||||
temp_character = Character(name=template.get('name', 'Unknown'))
|
||||
|
||||
# Create instance with LLM
|
||||
instance = cls(temp_character, LLMAgent())
|
||||
|
||||
# Add all memories with default importance (skip scoring)
|
||||
logger.info(f"Loading memories for {temp_character.name}")
|
||||
|
||||
# Add observations
|
||||
for observation in template.get('observations', []):
|
||||
await instance.memory_stream.add_memory(Memory(description=observation, memory_type='observation'))
|
||||
|
||||
# Add reflections
|
||||
for reflection in template.get('reflections', []):
|
||||
await instance.memory_stream.add_memory(Memory(description=reflection, memory_type='reflection'))
|
||||
|
||||
# Add plans
|
||||
for plan in template.get('plans', []):
|
||||
await instance.memory_stream.add_memory(Memory(description=plan, memory_type='plan'))
|
||||
|
||||
# create the character before we score to include the character in the prompts
|
||||
# Extract character info from memories to populate Character object
|
||||
logger.info(f"Creating Character...")
|
||||
instance.character = await instance._extract_character_from_memories()
|
||||
|
||||
logger.info(f"Added {len(instance.memory_stream.memories)} memories, now scoring importance...")
|
||||
|
||||
# Score all memories with full context
|
||||
for memory in tqdm(instance.memory_stream.memories, desc="Scoring memory importance", unit="memory"):
|
||||
# Score with related context
|
||||
memory.importance_score = await instance._score_memory_importance(memory)
|
||||
|
||||
logger.info(f"Character {instance.character.name} created successfully")
|
||||
return instance
|
||||
46
living_agents/datatypes.py
Normal file
46
living_agents/datatypes.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional, Literal, TypedDict
|
||||
from datetime import datetime
|
||||
from uuid import uuid4
|
||||
|
||||
|
||||
class CharacterTemplate(TypedDict):
|
||||
name: str
|
||||
observations: List[str]
|
||||
reflections: List[str]
|
||||
plans: List[str]
|
||||
yaml_file: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class Memory:
|
||||
"""A single memory object with Stanford's architecture"""
|
||||
description: str
|
||||
creation_time: datetime = field(default_factory=datetime.now)
|
||||
last_accessed: datetime = field(default_factory=datetime.now)
|
||||
importance_score: int = 5 # 1-10 scale
|
||||
embedding: Optional[List[float]] = None
|
||||
memory_type: Literal["observation", "reflection", "plan"] = "observation"
|
||||
related_memories: List[int] = field(default_factory=list) # IDs of supporting memories
|
||||
|
||||
def __post_init__(self):
|
||||
if self.last_accessed is None:
|
||||
self.last_accessed = self.creation_time
|
||||
|
||||
|
||||
@dataclass
|
||||
class Character:
|
||||
name: str # Still required
|
||||
age: Optional[int] = None
|
||||
personality: str = ""
|
||||
occupation: str = ""
|
||||
location: str = ""
|
||||
relationships: Dict[str, str] = field(default_factory=dict)
|
||||
goals: List[str] = field(default_factory=list)
|
||||
_id: str = field(default_factory=lambda: str(uuid4())[:8])
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._id)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Character) and self._id == other._id
|
||||
59
living_agents/llmagent.py
Normal file
59
living_agents/llmagent.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import os
|
||||
import logging
|
||||
from typing import List, Dict
|
||||
|
||||
from llm_connector import LLMClient, LLMBackend
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LLMAgent:
|
||||
client: LLMClient
|
||||
|
||||
def __init__(self, temperature: float = 0.8):
|
||||
# TODO User temperature
|
||||
backend: LLMBackend = {'base_url': os.environ['BACKEND_BASE_URL'],
|
||||
'api_token': os.environ['BACKEND_API_TOKEN'],
|
||||
'model': os.environ['BACKEND_MODEL']}
|
||||
agent_backend: LLMBackend = {
|
||||
'base_url': os.environ['AGENT_BASE_URL'],
|
||||
'api_token': os.environ['AGENT_API_TOKEN'],
|
||||
'model': os.environ['AGENT_MODEL']}
|
||||
embedding_backend: LLMBackend = {
|
||||
'base_url': os.environ['EMBEDDING_BASE_URL'],
|
||||
'api_token': os.environ['EMBEDDING_API_TOKEN'],
|
||||
'model': os.environ['EMBEDDING_MODEL']}
|
||||
self.client = LLMClient(agent_backend, embedding_backend)
|
||||
self.temperature = temperature
|
||||
|
||||
async def chat(self, messages: List[Dict[str, str]], max_tokens: int = 200) -> str:
|
||||
logger.info('Chat')
|
||||
try:
|
||||
response = ''
|
||||
async for chunk in self.client.get_response(messages, stream=False): # type: ignore
|
||||
|
||||
if 'content' in chunk:
|
||||
response += chunk['content']
|
||||
|
||||
"""response = client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
temperature=self.temperature,
|
||||
max_tokens=max_tokens
|
||||
)"""
|
||||
return response.strip()
|
||||
except Exception as e:
|
||||
return f"[LLM Error: {str(e)}]"
|
||||
|
||||
async def get_embedding(self, text: str) -> List[float]:
|
||||
"""Get embedding for memory relevance scoring"""
|
||||
try:
|
||||
response = await self.client.get_embedding(text)
|
||||
"""response = client.embeddings.create(
|
||||
model="text-embedding-ada-002",
|
||||
input=text
|
||||
)"""
|
||||
return response
|
||||
except Exception as e:
|
||||
print(f"Embedding error: {e}")
|
||||
return [0.0] * 1536 # Default embedding size
|
||||
123
living_agents/memory_stream.py
Normal file
123
living_agents/memory_stream.py
Normal file
@@ -0,0 +1,123 @@
|
||||
from collections.abc import Callable, Coroutine, Awaitable
|
||||
from typing import List, Optional
|
||||
from datetime import datetime
|
||||
|
||||
from sklearn.metrics.pairwise import cosine_similarity
|
||||
from living_agents import LLMAgent, Memory, Character, PromptManager
|
||||
from llm_connector import LLMMessage
|
||||
|
||||
|
||||
class MemoryStream:
|
||||
"""Stanford's memory architecture with observation, reflection, and planning"""
|
||||
|
||||
def __init__(self, llm_agent: LLMAgent):
|
||||
self.memories: List[Memory] = []
|
||||
self.llm = llm_agent
|
||||
self.importance_threshold = 150 # Reflection trigger threshold
|
||||
self.recent_importance_sum = 0
|
||||
|
||||
async def add_memory(self, memory: Memory):
|
||||
memory.embedding = await self.llm.get_embedding(memory.description)
|
||||
self.memories.append(memory)
|
||||
|
||||
async def add_observation(self, description: str, scoring_func: Optional[Callable[[Memory], Awaitable[int]]] = None) -> Memory:
|
||||
"""Add a new observation with importance scoring"""
|
||||
|
||||
memory = Memory(
|
||||
description=description,
|
||||
creation_time=datetime.now(),
|
||||
last_accessed=datetime.now(),
|
||||
importance_score=5,
|
||||
memory_type="observation"
|
||||
)
|
||||
|
||||
if scoring_func:
|
||||
memory.importance_score = await scoring_func(memory)
|
||||
|
||||
# Get embedding for retrieval
|
||||
memory.embedding = await self.llm.get_embedding(description)
|
||||
|
||||
self.memories.append(memory)
|
||||
|
||||
# Track for reflection trigger
|
||||
self.recent_importance_sum += memory.importance_score
|
||||
print(f"Recent Importance Sum: {self.recent_importance_sum}")
|
||||
|
||||
# Trigger reflection if threshold exceeded
|
||||
if self.recent_importance_sum >= self.importance_threshold:
|
||||
print("Reflection triggered.")
|
||||
await self._generate_reflections()
|
||||
self.recent_importance_sum = 0
|
||||
|
||||
return memory
|
||||
|
||||
async def _generate_reflections(self):
|
||||
"""Generate high-level reflections from recent memories"""
|
||||
# Get recent high-importance memories
|
||||
recent_memories = [m for m in self.memories[-20:] if m.memory_type == "observation"]
|
||||
|
||||
if len(recent_memories) < 3:
|
||||
return
|
||||
|
||||
# Generate questions for reflection
|
||||
memory_descriptions = "\n".join([f"{i + 1}. {m.description}" for i, m in enumerate(recent_memories)])
|
||||
|
||||
prompt, schema = PromptManager.get_prompt_with_schema('generate_reflection', {'{{recent_observations}}': memory_descriptions})
|
||||
|
||||
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
|
||||
|
||||
response = await self.llm.client.get_structured_response(messages, schema)
|
||||
|
||||
insight_prompt = ''
|
||||
|
||||
insights_response = await self.llm.chat([{"role": "user", "content": insight_prompt}])
|
||||
|
||||
# Parse insights and create reflection memories
|
||||
for line in insights_response.split('\n'):
|
||||
if '(' in line and ')' in line:
|
||||
insight = line.split('(')[0].strip()
|
||||
if insight and len(insight) > 10:
|
||||
# Create reflection memory
|
||||
reflection = Memory(
|
||||
description=f"Reflection: {insight}",
|
||||
creation_time=datetime.now(),
|
||||
last_accessed=datetime.now(),
|
||||
importance_score=7, # Reflections are generally important
|
||||
memory_type="reflection",
|
||||
embedding=await self.llm.get_embedding(insight)
|
||||
)
|
||||
self.memories.append(reflection)
|
||||
|
||||
async def retrieve_related_memories(self, query: str, k: int = 10) -> List[Memory]:
|
||||
"""Retrieve relevant memories using recency, importance, relevance"""
|
||||
if not self.memories:
|
||||
return []
|
||||
|
||||
query_embedding = await self.llm.get_embedding(query)
|
||||
current_time = datetime.now()
|
||||
scores = []
|
||||
|
||||
for i, memory in enumerate(self.memories):
|
||||
# Update last accessed
|
||||
memory.last_accessed = current_time
|
||||
|
||||
# Calculate recency (exponential decay)
|
||||
hours_since_accessed = (current_time - memory.last_accessed).total_seconds() / 3600
|
||||
recency = 0.995 ** hours_since_accessed
|
||||
|
||||
# Importance (already scored 1-10)
|
||||
importance = memory.importance_score / 10.0
|
||||
|
||||
# Relevance (cosine similarity)
|
||||
if memory.embedding and query_embedding:
|
||||
relevance = cosine_similarity([query_embedding], [memory.embedding])[0][0]
|
||||
else:
|
||||
relevance = 0.0
|
||||
|
||||
# Combined score (equal weighting as in Stanford paper)
|
||||
score = recency + importance + relevance
|
||||
scores.append((score, i, memory))
|
||||
|
||||
# Sort by score and return top k
|
||||
scores.sort(reverse=True, key=lambda x: x[0])
|
||||
return [memory for _, _, memory in scores[:k]]
|
||||
254
living_agents/prompt_manager.py
Normal file
254
living_agents/prompt_manager.py
Normal file
@@ -0,0 +1,254 @@
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, Set, Optional, Tuple
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PromptManager:
|
||||
"""Singleton class to manage prompt templates and JSON schemas"""
|
||||
|
||||
_instance: Optional['PromptManager'] = None
|
||||
_initialized: bool = False
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if not self._initialized:
|
||||
self.prompts: Dict[str, str] = {}
|
||||
self.schemas: Dict[str, Dict[str, Any]] = {}
|
||||
self.prompt_variables: Dict[str, Set[str]] = {}
|
||||
self._load_all_prompts()
|
||||
PromptManager._initialized = True
|
||||
|
||||
def _load_all_prompts(self):
|
||||
"""Load all markdown files and corresponding JSON schemas from the prompts folder"""
|
||||
prompts_dir = Path(__file__).parent / 'prompts'
|
||||
|
||||
if not prompts_dir.exists():
|
||||
logger.warning(f"Prompts directory not found: {prompts_dir}")
|
||||
prompts_dir.mkdir(parents=True, exist_ok=True)
|
||||
return
|
||||
|
||||
logger.info(f"Loading prompts and schemas from {prompts_dir}")
|
||||
|
||||
# Load all .md files
|
||||
for md_file in prompts_dir.glob("*.md"):
|
||||
prompt_name = md_file.stem # filename without extension
|
||||
|
||||
try:
|
||||
# Load prompt template
|
||||
with open(md_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read().strip()
|
||||
|
||||
# Extract variables from {{variable}} patterns
|
||||
variables = self._extract_variables(content)
|
||||
|
||||
self.prompts[prompt_name] = content
|
||||
self.prompt_variables[prompt_name] = variables
|
||||
|
||||
# Look for corresponding JSON schema file
|
||||
schema_file = md_file.with_suffix('.json')
|
||||
if schema_file.exists():
|
||||
try:
|
||||
with open(schema_file, 'r', encoding='utf-8') as f:
|
||||
schema = json.load(f)
|
||||
|
||||
self.schemas[prompt_name] = schema
|
||||
logger.debug(f"Loaded prompt '{prompt_name}' with schema and variables: {variables}")
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Invalid JSON schema in {schema_file}: {e}")
|
||||
|
||||
else:
|
||||
logger.debug(f"Loaded prompt '{prompt_name}' (no schema) with variables: {variables}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading prompt file {md_file}: {e}")
|
||||
|
||||
logger.info(f"Loaded {len(self.prompts)} prompt templates, {len(self.schemas)} with schemas")
|
||||
|
||||
def _extract_variables(self, template: str) -> Set[str]:
|
||||
"""Extract all {{variable}} placeholders from template"""
|
||||
pattern = r'\{\{(\w+)\}\}'
|
||||
variables = set(re.findall(pattern, template))
|
||||
return variables
|
||||
|
||||
def _validate_context(self, prompt_name: str, context: Dict[str, Any]) -> None:
|
||||
"""Validate that all required variables are provided"""
|
||||
if prompt_name not in self.prompt_variables:
|
||||
raise ValueError(f"Unknown prompt: '{prompt_name}'")
|
||||
|
||||
required_vars = self.prompt_variables[prompt_name]
|
||||
provided_vars = set(context.keys())
|
||||
|
||||
missing_vars = required_vars - provided_vars
|
||||
if missing_vars:
|
||||
raise ValueError(
|
||||
f"Missing required variables for prompt '{prompt_name}': {missing_vars}. "
|
||||
f"Required: {required_vars}, Provided: {provided_vars}"
|
||||
)
|
||||
|
||||
# Warn about extra variables (not an error, but might indicate mistakes)
|
||||
extra_vars = provided_vars - required_vars
|
||||
if extra_vars:
|
||||
logger.warning(f"Extra variables provided for prompt '{prompt_name}': {extra_vars}")
|
||||
|
||||
def _fill_template(self, template: str, context: Dict[str, Any]) -> str:
|
||||
"""Fill template with context variables"""
|
||||
result = template
|
||||
|
||||
for key, value in context.items():
|
||||
placeholder = f"{{{{{key}}}}}" # {{key}}
|
||||
result = result.replace(placeholder, str(value))
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def get_prompt(cls, prompt_name: str, context: Dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Get a processed prompt with variables filled in
|
||||
|
||||
Args:
|
||||
prompt_name: Name of the prompt template (filename without .md)
|
||||
context: Dictionary of variables to fill in the template
|
||||
|
||||
Returns:
|
||||
Processed prompt string
|
||||
|
||||
Raises:
|
||||
ValueError: If prompt doesn't exist or required variables are missing
|
||||
"""
|
||||
instance = cls()
|
||||
|
||||
if prompt_name not in instance.prompts:
|
||||
available_prompts = list(instance.prompts.keys())
|
||||
raise ValueError(f"Prompt '{prompt_name}' not found. Available prompts: {available_prompts}")
|
||||
|
||||
context = context or {}
|
||||
|
||||
# Validate that all required variables are provided
|
||||
instance._validate_context(prompt_name, context)
|
||||
|
||||
# Fill the template
|
||||
template = instance.prompts[prompt_name]
|
||||
processed_prompt = instance._fill_template(template, context)
|
||||
|
||||
return processed_prompt
|
||||
|
||||
@classmethod
|
||||
def get_schema(cls, prompt_name: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get the JSON schema for a prompt if it exists
|
||||
|
||||
Args:
|
||||
prompt_name: Name of the prompt template
|
||||
|
||||
Returns:
|
||||
JSON schema dictionary or None if no schema exists
|
||||
"""
|
||||
instance = cls()
|
||||
|
||||
if prompt_name not in instance.prompts:
|
||||
raise ValueError(f"Prompt '{prompt_name}' not found")
|
||||
|
||||
return instance.schemas.get(prompt_name)
|
||||
|
||||
@classmethod
|
||||
def has_schema(cls, prompt_name: str) -> bool:
|
||||
"""Check if a prompt has a JSON schema"""
|
||||
instance = cls()
|
||||
return prompt_name in instance.schemas
|
||||
|
||||
@classmethod
|
||||
def get_prompt_with_schema(cls, prompt_name: str, context: Dict[str, Any] = None) -> Tuple[str, Optional[Dict[str, Any]]]:
|
||||
"""
|
||||
Get both the processed prompt and its schema (if available)
|
||||
|
||||
Returns:
|
||||
Tuple of (prompt_string, schema_dict_or_None)
|
||||
"""
|
||||
prompt = cls.get_prompt(prompt_name, context)
|
||||
schema = cls.get_schema(prompt_name)
|
||||
|
||||
return prompt, schema
|
||||
|
||||
@classmethod
|
||||
def list_prompts(cls) -> Dict[str, Dict[str, Any]]:
|
||||
"""
|
||||
List all available prompts with their info
|
||||
|
||||
Returns:
|
||||
Dictionary mapping prompt names to their info (variables, has_schema)
|
||||
"""
|
||||
instance = cls()
|
||||
|
||||
result = {}
|
||||
for prompt_name in instance.prompts:
|
||||
result[prompt_name] = {
|
||||
'variables': instance.prompt_variables[prompt_name],
|
||||
'has_schema': prompt_name in instance.schemas,
|
||||
'variable_count': len(instance.prompt_variables[prompt_name])
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def reload_prompts(cls):
|
||||
"""Reload all prompt templates and schemas (useful for development)"""
|
||||
if cls._instance:
|
||||
cls._instance._load_all_prompts()
|
||||
logger.info("Prompts and schemas reloaded")
|
||||
|
||||
@classmethod
|
||||
def get_prompt_info(cls, prompt_name: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get detailed information about a specific prompt
|
||||
|
||||
Returns:
|
||||
Dictionary with prompt template, schema, and required variables
|
||||
"""
|
||||
instance = cls()
|
||||
|
||||
if prompt_name not in instance.prompts:
|
||||
raise ValueError(f"Prompt '{prompt_name}' not found")
|
||||
|
||||
info = {
|
||||
'name': prompt_name,
|
||||
'template': instance.prompts[prompt_name],
|
||||
'variables': instance.prompt_variables[prompt_name],
|
||||
'variable_count': len(instance.prompt_variables[prompt_name]),
|
||||
'has_schema': prompt_name in instance.schemas
|
||||
}
|
||||
|
||||
if prompt_name in instance.schemas:
|
||||
info['schema'] = instance.schemas[prompt_name]
|
||||
|
||||
return info
|
||||
|
||||
|
||||
# Updated convenience functions
|
||||
def get_prompt(prompt_name: str, context: Dict[str, Any] = None) -> str:
|
||||
"""Convenience function to get a processed prompt"""
|
||||
return PromptManager.get_prompt(prompt_name, context)
|
||||
|
||||
|
||||
def get_prompt_with_schema(prompt_name: str, context: Dict[str, Any] = None) -> Tuple[str, Optional[Dict[str, Any]]]:
|
||||
"""Convenience function to get prompt and schema together"""
|
||||
return PromptManager.get_prompt_with_schema(prompt_name, context)
|
||||
|
||||
|
||||
def get_schema(prompt_name: str) -> Optional[Dict[str, Any]]:
|
||||
"""Convenience function to get just the schema"""
|
||||
return PromptManager.get_schema(prompt_name)
|
||||
|
||||
|
||||
def has_schema(prompt_name: str) -> bool:
|
||||
"""Check if a prompt has structured output schema"""
|
||||
return PromptManager.has_schema(prompt_name)
|
||||
5
living_agents/prompts/character_summary.md
Normal file
5
living_agents/prompts/character_summary.md
Normal file
@@ -0,0 +1,5 @@
|
||||
You are {{character_name}}.
|
||||
Age: {{character_age}}
|
||||
Personality: {{character_personality}}
|
||||
Occupation: {{character_occupation}}
|
||||
Current location: {{character_location}}
|
||||
48
living_agents/prompts/extract_character_from_memories.json
Normal file
48
living_agents/prompts/extract_character_from_memories.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Character's full name"
|
||||
},
|
||||
"age": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"maximum": 120
|
||||
},
|
||||
"personality": {
|
||||
"type": "string",
|
||||
"description": "Brief personality description based on memories"
|
||||
},
|
||||
"occupation": {
|
||||
"type": "string",
|
||||
"description": "Character's job or role"
|
||||
},
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "Current location of the character"
|
||||
},
|
||||
"relationships": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string",
|
||||
"description": "Relationship description"
|
||||
}
|
||||
},
|
||||
"goals": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "List of character goals"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"age",
|
||||
"personality",
|
||||
"occupation",
|
||||
"location"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
5
living_agents/prompts/extract_character_from_memories.md
Normal file
5
living_agents/prompts/extract_character_from_memories.md
Normal file
@@ -0,0 +1,5 @@
|
||||
Based on these memories about yourself, describe who you are:
|
||||
|
||||
{{memory_context}}
|
||||
|
||||
Tell me about yourself - your identity, relationships, goals, and what matters to you.
|
||||
17
living_agents/prompts/extract_interaction_memories.json
Normal file
17
living_agents/prompts/extract_interaction_memories.json
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"memories": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"maxLength": 100
|
||||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 4
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"memories"
|
||||
]
|
||||
}
|
||||
7
living_agents/prompts/extract_interaction_memories.md
Normal file
7
living_agents/prompts/extract_interaction_memories.md
Normal file
@@ -0,0 +1,7 @@
|
||||
Extract key factual memories from this interaction. Focus on actions taken, emotions felt, and important facts learned.
|
||||
|
||||
Situation: {{situation}}
|
||||
Response: {{response}}
|
||||
|
||||
Extract 2-4 concise memories that capture the essence of what happened, how the character felt, and any new information
|
||||
learned.
|
||||
46
living_agents/prompts/generate_reflection.json
Normal file
46
living_agents/prompts/generate_reflection.json
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"reflections": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"insight": {
|
||||
"type": "string",
|
||||
"description": "A high-level insight or reflection about the character",
|
||||
"minLength": 10,
|
||||
"maxLength": 200
|
||||
},
|
||||
"evidence_indices": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"minimum": 1
|
||||
},
|
||||
"description": "Numbers of the observations that support this insight",
|
||||
"minItems": 1
|
||||
},
|
||||
"importance": {
|
||||
"type": "integer",
|
||||
"minimum": 6,
|
||||
"maximum": 10,
|
||||
"description": "Importance score for this reflection (6-10, as reflections are generally important)"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"insight",
|
||||
"evidence_indices",
|
||||
"importance"
|
||||
],
|
||||
"additionalProperties": false
|
||||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 5
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"reflections"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
17
living_agents/prompts/generate_reflection.md
Normal file
17
living_agents/prompts/generate_reflection.md
Normal file
@@ -0,0 +1,17 @@
|
||||
Analyze these recent observations and generate high-level insights about the character.
|
||||
|
||||
Recent observations:
|
||||
{{recent_observations}}
|
||||
|
||||
Based on these observations, what insights can you infer about this character's personality, relationships, motivations,
|
||||
or behavioral patterns?
|
||||
|
||||
Focus on:
|
||||
|
||||
- Personality traits and characteristics
|
||||
- Relationship dynamics and feelings toward others
|
||||
- Motivations and underlying desires
|
||||
- Behavioral patterns and habits
|
||||
- Self-awareness and emotional states
|
||||
|
||||
For each insight, identify which specific observations support it by referencing their numbers.
|
||||
17
living_agents/prompts/react_to_situation.md
Normal file
17
living_agents/prompts/react_to_situation.md
Normal file
@@ -0,0 +1,17 @@
|
||||
{{character}}
|
||||
|
||||
Relevant memories from your past:
|
||||
{{memory_context}}
|
||||
|
||||
Current situation: {{situation}}
|
||||
|
||||
Respond as {{character_name}} describing what you did and how you reacted. Write in first person past tense as if this
|
||||
just happened to you.
|
||||
|
||||
Examples of the response style:
|
||||
|
||||
- "I looked up from my book and smiled nervously..."
|
||||
- "I felt my heart race and took a deep breath before I said..."
|
||||
- "I hesitated for a moment, then decided to..."
|
||||
|
||||
Stay completely in character and be specific about your actions, thoughts, and words.
|
||||
37
living_agents/prompts/score_importance_with_context.md
Normal file
37
living_agents/prompts/score_importance_with_context.md
Normal file
@@ -0,0 +1,37 @@
|
||||
{{character}}
|
||||
|
||||
Rate the importance of this memory on a scale 1-10.
|
||||
|
||||
Related context from this character:
|
||||
{{related_memories}}
|
||||
|
||||
Memory to rate: {{memory_text}}
|
||||
Memory type: {{memory_type}}
|
||||
|
||||
Guidelines:
|
||||
|
||||
**Observations:**
|
||||
|
||||
- Core identity (name, age, physical traits): 8-9 (essential for character consistency)
|
||||
- Personality traits and characteristics: 7-9 (fundamental to who they are)
|
||||
- Significant relationships and emotional connections: 6-9 (defines social bonds)
|
||||
- Major life events, achievements, failures: 8-10 (shapes character development)
|
||||
- Skills, occupation, expertise: 6-8 (defines capabilities and role)
|
||||
- Daily routines and mundane activities: 1-3 (low significance unless meaningful)
|
||||
- Life-changing events, trauma, breakthroughs: 10 (transforms the character)
|
||||
|
||||
**Reflections:**
|
||||
|
||||
- Self-awareness and personality insights: 8-10 (core understanding of self)
|
||||
- Understanding of relationships with others: 7-9 (social comprehension)
|
||||
- Minor observations about preferences: 6-7 (useful but not critical)
|
||||
- Life philosophy and values: 9-10 (guides all behavior)
|
||||
|
||||
**Plans:**
|
||||
|
||||
- Life-defining goals and dreams: 9-10 (drives major decisions)
|
||||
- Important short-term objectives: 6-8 (affects immediate behavior)
|
||||
- Casual wishes and minor wants: 3-5 (low priority desires)
|
||||
|
||||
Given the context, how important is this memory for understanding and portraying this character? Respond with only a
|
||||
number 1-10.
|
||||
25
living_agents/prompts/score_memory_importance.md
Normal file
25
living_agents/prompts/score_memory_importance.md
Normal file
@@ -0,0 +1,25 @@
|
||||
Rate how important this memory would be to this specific person (1-10):
|
||||
|
||||
{{character_context}}
|
||||
|
||||
Memory: {{description}}
|
||||
|
||||
Consider:
|
||||
- Does this relate to their personality traits?
|
||||
- Does this connect to their occupation or goals?
|
||||
- Would someone with this personality care deeply about this?
|
||||
- Is this core identity information? (Always rate 8-9)
|
||||
|
||||
Examples:
|
||||
- "My name is Sarah and I'm 25" = 9 (fundamental identity)
|
||||
- "My personality is shy and thoughtful" = 9 (core self-knowledge)
|
||||
- Art student + "saw beautiful painting" = 8
|
||||
- Art student + "debugged code" = 3
|
||||
- Shy person + "gave public speech" = 9
|
||||
- Outgoing person + "gave public speech" = 5
|
||||
- "I brushed my teeth" = 1
|
||||
- "I had lunch" = 2
|
||||
|
||||
Return ONLY the number, no explanation.
|
||||
|
||||
Rating:
|
||||
91
living_agents/roleplay_system.py
Normal file
91
living_agents/roleplay_system.py
Normal file
@@ -0,0 +1,91 @@
|
||||
from typing import List, Dict, Literal
|
||||
from living_agents import LLMAgent, Character, CharacterAgent, Memory
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
class RoleplaySystem:
|
||||
agents: Dict[Character, CharacterAgent]
|
||||
global_time: datetime
|
||||
scene_state: Dict
|
||||
|
||||
def __init__(self):
|
||||
self.agents = {}
|
||||
self.global_time = datetime.now()
|
||||
self.scene_state = {
|
||||
"location": "cozy coffee shop",
|
||||
"time": "afternoon",
|
||||
"atmosphere": "quiet and peaceful",
|
||||
"active_conversations": [],
|
||||
"events": []
|
||||
}
|
||||
|
||||
async def setup_characters(self, characters: List[Character]):
|
||||
logger.info('Setting up Characters.')
|
||||
|
||||
for character in characters:
|
||||
self.agents[character] = CharacterAgent(character, LLMAgent(temperature=0.9))
|
||||
await self.agents[character].initialize_memories()
|
||||
|
||||
async def get_character_response(self, character: Character, user_input: str) -> str:
|
||||
|
||||
print(f"🧠 {character} accessing memories...")
|
||||
|
||||
# Agent perceives user interaction
|
||||
await self.agents[character].perceive(f"Someone asked me: '{user_input}'")
|
||||
|
||||
# Generate response
|
||||
response = await self.agents[character].react_to_situation(user_input)
|
||||
return response
|
||||
|
||||
async def character_chat(self, character_1: Character, character_2: Character, context: str) -> str:
|
||||
"""Make two characters interact with each other"""
|
||||
interaction = await self.character_interaction(character_1, character_2, context)
|
||||
|
||||
result = f"\n💬 **{character_1}**: {interaction[character_1]}\n💬 **{character_2}**: {interaction[character_2]}\n"
|
||||
return result
|
||||
|
||||
async def advance_time(self, hours: int = 1):
|
||||
"""Advance scene time and trigger agent planning"""
|
||||
self.global_time += timedelta(hours=hours)
|
||||
self.scene_state["time"] = self.global_time.strftime("%I:%M %p")
|
||||
|
||||
# Each agent plans their next actions
|
||||
for character, agent in self.agents.items():
|
||||
await agent.perceive(f"Time is now {self.scene_state['time']}")
|
||||
|
||||
def get_character_memories(self, character: Character, memory_type: Literal['all', 'observation', 'reflection', 'plan'] = "all") -> List[Memory]:
|
||||
return self.agents[character].memory_stream.memories
|
||||
|
||||
async def get_character_summary(self, character: Character) -> str:
|
||||
"""Get AI-generated summary of character based on their memories"""
|
||||
|
||||
summary = await self.agents[character].get_summary()
|
||||
|
||||
return f"\n📝 Current summary of {character}:\n{summary}\n"
|
||||
|
||||
async def character_interaction(self, character_1: Character, character_2: Character, context: str) -> Dict[Character, str]:
|
||||
"""Handle interaction between two characters"""
|
||||
|
||||
char1_agent = self.agents[character_1]
|
||||
char2_agent = self.agents[character_2]
|
||||
|
||||
# Both characters observe the interaction context
|
||||
await char1_agent.perceive(f"Interacting with {character_2}: {context}")
|
||||
await char2_agent.perceive(f"Interacting with {character_1}: {context}")
|
||||
|
||||
# Generate responses
|
||||
char1_response = await char1_agent.react_to_situation(f"You are talking with {character_2}. Context: {context}")
|
||||
char2_response = await char2_agent.react_to_situation(f"{character_1} said: '{char1_response}'")
|
||||
|
||||
# Both remember the conversation
|
||||
await char1_agent.perceive(f"Conversation with {character_2}: I said '{char1_response}', they replied '{char2_response}'")
|
||||
await char2_agent.perceive(f"Conversation with {character_1}: They said '{char1_response}', I replied '{char2_response}'")
|
||||
|
||||
return {
|
||||
character_1: char1_response,
|
||||
character_2: char2_response
|
||||
}
|
||||
Reference in New Issue
Block a user