This commit is contained in:
2025-09-01 06:43:11 +02:00
parent bde3fc0df9
commit 45eb2b8bc5
38 changed files with 3424 additions and 915 deletions

View File

@@ -23,8 +23,11 @@ This is a multi-agent roleplay system implementing Stanford's "Generative Agents
### UI Framework
- **NiceGUI** for web interface (async components)
- **AsyncElement base class**: Never override __init__, use create() factory method and implement build()
- **Dialog support**: Can create elements as dialogs with as_dialog()
- **AsyncElement base class**: Simplified async UI component pattern
- Constructor accepts element_type (default: ui.column) and element args/kwargs
- Implement build() method for async initialization logic
- Use create() factory method which returns the NiceGUI element directly
- Supports method chaining on the returned element
- Pages are created in pages/ directory, main page is MainPage
## Development Commands
@@ -48,10 +51,36 @@ uv python pin 3.12 # Pin to Python 3.12
### AsyncElement Usage
When creating UI components that extend AsyncElement:
- NEVER override the __init__ method
- Always use the `create()` factory method: `await MyComponent.create(params)`
- Implement the `build()` method for initialization logic
- Pass parameters through build(), not __init__
```python
class MyComponent(AsyncElement):
async def build(self, param1: str, param2: int, *args, **kwargs) -> None:
# Build content directly in self.element
with self.element:
ui.label(f'{param1}: {param2}')
# Add more UI elements...
# Usage - create() returns the NiceGUI element directly, supports method chaining
(await MyComponent.create(element_type=ui.card, param1="test", param2=123)).classes('w-full')
# Can specify different element types
(await MyComponent.create(element_type=ui.row, param1="test", param2=456)).classes('gap-4')
# Pass element constructor args/kwargs via special keys
await MyComponent.create(
element_type=ui.column,
element_args=(), # Positional args for element constructor
element_kwargs={'classes': 'p-4'}, # Kwargs for element constructor
param1="test", # Build method parameters
param2=789
)
```
Key points:
- Constructor accepts element_type (default: ui.column) and element args/kwargs
- build() method receives component-specific parameters
- create() factory method returns the NiceGUI element directly (not the AsyncElement instance)
- Supports method chaining on the returned element
- Use `with self.element:` context manager to add content in build()
### LLM Integration
The project has two LLM integration approaches:

277
character_explorer.py Normal file
View File

@@ -0,0 +1,277 @@
import asyncio
import os
from pprint import pprint
import yaml
from pathlib import Path
from dotenv import load_dotenv
from living_agents import CharacterAgent, PromptManager
import logging
logger = logging.getLogger(__name__)
class CharacterExplorer:
"""Interactive explorer for testing CharacterAgent functionality"""
def __init__(self, character_agent: CharacterAgent):
self.agent = character_agent
async def start_interactive_session(self):
"""Start the interactive exploration menu"""
print(f"\n🎭 Character Explorer - {self.agent.character.name}")
print("=" * 50)
while True:
self._show_menu()
try:
choice = input("\nChoose option (1-6): ").strip()
if choice == "1":
await self._handle_ask_question()
elif choice == "2":
await self._handle_list_memories()
elif choice == "3":
await self._handle_view_memory()
elif choice == "4":
await self._handle_memory_stats()
elif choice == "5":
await self._handle_character_summary()
elif choice == "6":
print("👋 Goodbye!")
break
else:
print("❌ Invalid choice. Please enter 1-6.")
except KeyboardInterrupt:
print("\n👋 Goodbye!")
break
except Exception as e:
print(f"❌ Error: {e}")
def _show_menu(self):
"""Display the interactive menu options"""
print(f"\n🎭 Character Explorer Menu")
print("=" * 30)
print("1. 💬 Ask a question")
print("2. 📚 List all memories")
print("3. 🔍 View specific memory (with related)")
print("4. 📊 Memory statistics")
print("5. 👤 Character summary")
print("6. 🚪 Exit")
async def _handle_ask_question(self):
"""Handle asking questions to the character"""
question = input(f"\n💬 Ask {self.agent.character.name}: ").strip()
if not question:
return
print(f"\n🤔 {self.agent.character.name} is thinking...")
try:
response = await self.agent.react_to_situation(question)
print(f"💬 {self.agent.character.name}: {response}")
# Show which memories were retrieved for this response
relevant_memories = await self.agent.memory_stream.retrieve_related_memories(question, k=5)
print(f"\n🧠 Memories used for this response:")
for i, mem in enumerate(relevant_memories, 1):
print(f" {i}. [{mem.memory_type}] {mem.description[:80]}{'...' if len(mem.description) > 80 else ''}")
except Exception as e:
print(f"❌ Error getting response: {e}")
async def _handle_list_memories(self):
"""List all memories with filtering options"""
print("\n📚 Memory Filter Options:")
print("1. All memories")
print("2. Observations only")
print("3. Reflections only")
print("4. Plans only")
print("5. By importance (high to low)")
print("6. By recency (newest first)")
filter_choice = input("Choose filter (1-6): ").strip()
memories = self.agent.memory_stream.memories.copy()
if filter_choice == "2":
memories = [m for m in memories if m.memory_type == "observation"]
title = "Observations"
elif filter_choice == "3":
memories = [m for m in memories if m.memory_type == "reflection"]
title = "Reflections"
elif filter_choice == "4":
memories = [m for m in memories if m.memory_type == "plan"]
title = "Plans"
elif filter_choice == "5":
memories = sorted(memories, key=lambda m: m.importance_score, reverse=True)
title = "All Memories (by importance)"
elif filter_choice == "6":
memories = sorted(memories, key=lambda m: m.creation_time, reverse=True)
title = "All Memories (by recency)"
else:
title = "All Memories"
print(f"\n📋 {title} ({len(memories)} total):")
for i, memory in enumerate(memories, 1):
age_hours = (memory.last_accessed - memory.creation_time).total_seconds() / 3600
print(
f"{i:3d}. [{memory.memory_type[:4]}] [imp:{memory.importance_score}] [age:{age_hours:.1f}h] {memory.description}")
if len(memories) > 20:
print(f"\n... showing first 20 of {len(memories)} memories")
print("💡 Tip: Use option 3 to view specific memories in detail")
async def _handle_view_memory(self):
"""View a specific memory with its related memories"""
try:
memory_num = int(input(f"\nEnter memory number (1-{len(self.agent.memory_stream.memories)}): ").strip())
if 1 <= memory_num <= len(self.agent.memory_stream.memories):
memory = self.agent.memory_stream.memories[memory_num - 1]
print(f"\n🔍 Memory #{memory_num} Details:")
print(f" Type: {memory.memory_type}")
print(f" Importance: {memory.importance_score}/10")
print(f" Created: {memory.creation_time.strftime('%Y-%m-%d %H:%M:%S')}")
print(f" Last accessed: {memory.last_accessed.strftime('%Y-%m-%d %H:%M:%S')}")
print(f" Description: {memory.description}")
# Show related memories using embeddings
print(f"\n🔗 Related memories (by similarity):")
try:
related = await self.agent._get_related_memories_for_scoring(
memory.description,
exclude_self=memory,
k=5
)
for i, rel_mem in enumerate(related, 1):
rel_index = self.agent.memory_stream.memories.index(rel_mem) + 1
print(
f" {i}. [#{rel_index}] [{rel_mem.memory_type}] {rel_mem.description[:70]}{'...' if len(rel_mem.description) > 70 else ''}")
if not related:
print(" (No related memories found)")
except Exception as e:
print(f" ❌ Error finding related memories: {e}")
else:
print(f"❌ Invalid memory number. Range: 1-{len(self.agent.memory_stream.memories)}")
except ValueError:
print("❌ Please enter a valid number")
async def _handle_memory_stats(self):
"""Show detailed memory statistics"""
memories = self.agent.memory_stream.memories
print(f"\n📊 Memory Statistics for {self.agent.character.name}:")
print(f" Total memories: {len(memories)}")
# Breakdown by type
by_type = {}
for mem in memories:
by_type[mem.memory_type] = by_type.get(mem.memory_type, 0) + 1
print(f"\n📂 Memory Types:")
for mem_type, count in by_type.items():
percentage = (count / len(memories)) * 100
print(f" {mem_type.title()}: {count} ({percentage:.1f}%)")
# Importance distribution
importance_scores = [m.importance_score for m in memories]
if importance_scores:
print(f"\n📈 Importance Distribution:")
print(f" Average: {sum(importance_scores) / len(importance_scores):.1f}")
print(f" Range: {min(importance_scores)}-{max(importance_scores)}")
# Visual histogram
print(f" Score distribution:")
for score in range(1, 11):
count = importance_scores.count(score)
bar = "" * max(1, count // 2) if count > 0 else ""
print(f" {score:2d}: {count:2d} {bar}")
# Recency info
if memories:
oldest = min(memories, key=lambda m: m.creation_time)
newest = max(memories, key=lambda m: m.creation_time)
print(f"\n⏰ Time Span:")
print(f" Oldest: {oldest.creation_time.strftime('%Y-%m-%d %H:%M')}")
print(f" Newest: {newest.creation_time.strftime('%Y-%m-%d %H:%M')}")
async def _handle_character_summary(self):
"""Generate and show character summary"""
print(f"\n👤 Generating summary for {self.agent.character.name}...")
try:
# summary = await self.agent.get_summary()
print(f"\n📝 AI-Generated Character Summary:")
# print(f" {summary}")
print(f"\n📋 Structured Character Data:")
print(f" Name: {self.agent.character.name}")
print(f" Age: {self.agent.character.age}")
print(f" Personality: {self.agent.character.personality}")
print(f" Occupation: {self.agent.character.occupation}")
print(f" Location: {self.agent.character.location}")
if self.agent.character.relationships:
print(f" Relationships:")
for person, relationship in self.agent.character.relationships.items():
print(f"{person}: {relationship}")
if self.agent.character.goals:
print(f" Goals:")
for goal in self.agent.character.goals:
print(f"{goal}")
except Exception as e:
print(f"❌ Error generating summary: {e}")
async def load_and_explore_character(template_path: str):
"""Load a character template and start exploration"""
# Load environment
# env_path = Path(__file__).parent.parent / '.env'
load_dotenv()
# Load template
if not Path(template_path).exists():
print(f"❌ Template file not found: {template_path}")
return None
try:
with open(template_path, 'r', encoding='utf-8') as f:
template = yaml.safe_load(f)
except Exception as e:
print(f"❌ Error loading template: {e}")
return None
print(f"📁 Loading character from {template_path}...")
print(f"🤖 Creating memories and scoring importance...")
try:
# Create character agent
agent = await CharacterAgent.create_from_template(template)
print(f"{agent.character.name} loaded successfully!")
# Start explorer
explorer = CharacterExplorer(agent)
await explorer.start_interactive_session()
return agent
except Exception as e:
print(f"❌ Error creating character: {e}")
import traceback
traceback.print_exc()
return None
if __name__ == '__main__':
template = Path(__file__).parent / 'character_templates' / 'Alice.yml'
asyncio.run(load_and_explore_character(str(template)))

View File

@@ -0,0 +1,34 @@
observations:
- "My name is Alice and I am 23 years old"
- "I study Victorian literature at the graduate level"
- "I have shoulder-length brown hair and green eyes"
- "I wear reading glasses when studying"
- "I get nervous when talking to people I don't know well"
- "I love mystery novels, especially Agatha Christie"
- "Professor Wilson is my thesis advisor and has high expectations"
- "I need to finish my thesis chapter on Victorian women writers"
- "I spilled coffee on my research notes three days ago"
- "Emma the barista helped me clean up the coffee mess"
- "Emma has curly red hair and warm brown eyes"
- "Emma said 'don't worry, it happens to everyone' when I spilled coffee"
- "Emma smiled warmly while helping me clean up"
- "I felt my heart race when Emma briefly touched my hand"
- "I've been coming to this coffee shop every day since the spill"
- "I always sit at the corner table where I can see Emma working"
- "I find myself watching Emma when she's not looking"
- "I ordered an extra pastry yesterday just to talk to Emma longer"
- "Emma remembers that I always order a medium cappuccino"
- "I feel butterflies in my stomach when Emma looks at me"
reflections:
- "I have developed romantic feelings for Emma (observations: heart racing, daily visits, watching her, butterflies feeling)"
- "Emma is naturally kind and doesn't judge people for mistakes (observations: gentle coffee cleanup help, warm reassuring words)"
- "I become more nervous around people I find attractive (observations: stammering with Emma vs confident discussing literature)"
- "I use small purchases as excuses to interact with people I like (observations: unnecessary pastry purchase)"
- "I am drawn to compassionate people who make me feel safe (observations: Emma's kindness made me want to return daily)"
plans:
- "I want to have a real conversation with Emma beyond ordering coffee"
- "I should find a natural way to compliment Emma without being obvious"
- "I need to work on being less nervous around attractive people"
- "I want to discover if Emma might be interested in literature or books"

View File

@@ -0,0 +1,32 @@
observations:
- "My name is Bob and I am 28 years old"
- "I work as a senior fullstack developer at TechFlow startup"
- "I have dark hair and a beard that I keep neatly trimmed"
- "I feel confident in social situations and enjoy meeting people"
- "I love solving technical problems and helping others with code"
- "I shipped a major React feature last week and feel proud of it"
- "I come to this coffee shop every morning around 9 AM"
- "Emma is the barista who always remembers my order"
- "Emma makes excellent coffee and has great customer service skills"
- "I always tip well because I believe in supporting service workers"
- "There's a quiet regular named Alice who seems interesting"
- "Alice always sits in the corner reading thick academic books"
- "Alice appears shy but I notice she's very observant"
- "Alice spilled coffee a few days ago and Emma helped her"
- "I've been wanting to learn more about AI and machine learning"
- "I'm single and would like to meet someone intellectually stimulating"
- "I enjoy the community atmosphere of this coffee shop"
- "I sometimes work on my laptop here when I need a change of scenery"
reflections:
- "I am comfortable in social situations and don't fear rejection (observations: easy conversations, confident demeanor)"
- "I value intelligence and thoughtfulness in potential partners (observations: noticing Alice's academic books, finding her interesting)"
- "I appreciate people who are good at their jobs (observations: Emma's excellent service, my satisfaction with quality work)"
- "I am ready for a meaningful relationship with someone who shares my interests (observations: feeling accomplished, wanting intellectual stimulation)"
- "Small businesses like coffee shops create community connections (observations: knowing regulars, enjoying atmosphere)"
plans:
- "I want to ask someone interesting on a date soon"
- "I should start a conversation with Alice about the books she's reading"
- "I need to research AI/ML courses or bootcamps to expand my skills"
- "I want to continue supporting this coffee shop as my regular spot"

View File

@@ -0,0 +1,33 @@
observations:
- "My name is Emma and I am 25 years old"
- "I work as a barista at Sunrise Coffee Shop"
- "I am also an art student specializing in mixed media"
- "I have curly red hair and freckles across my nose"
- "I love meeting different people through my work"
- "I try to remember each regular customer's usual order"
- "I dream of opening my own art gallery someday"
- "I need to save money for art supplies and gallery plans"
- "Alice is a sweet regular who comes in daily this week"
- "Alice always orders a medium cappuccino and sits in the corner"
- "Alice seems very shy and gets flustered easily"
- "Alice spilled coffee three days ago and looked mortified"
- "I helped Alice clean up her spill and tried to reassure her"
- "Alice has been coming in every day since the coffee incident"
- "Bob is a tech worker who tips generously and is always polite"
- "Bob seems confident and easy-going in conversations"
- "I enjoy creating a welcoming atmosphere for all customers"
- "I'm working on paintings for a potential local art show"
- "I want to connect with more creative people in the community"
reflections:
- "Alice might be developing feelings for me based on her behavior (observations: daily visits after spill, nervous demeanor, watching me)"
- "I have a natural talent for making people feel comfortable (observations: successful reassurance of Alice, positive customer interactions)"
- "Regular customers become like extended community family (observations: knowing Bob and Alice's preferences, caring about their wellbeing)"
- "I am good at reading people's emotional states and responding appropriately (observations: recognizing Alice's embarrassment, Bob's confidence)"
- "My work connects me to diverse people I wouldn't meet otherwise (observations: Alice the literature student, Bob the developer)"
plans:
- "I want to organize a local art show to showcase emerging artists"
- "I should be extra kind to Alice since she seems to need encouragement"
- "I need to save more money by picking up extra shifts"
- "I want to find creative people who might participate in an art show"

15
characters/alice.yml Normal file
View File

@@ -0,0 +1,15 @@
name: "Alice"
age: 23
personality: "Introverted literature student who loves mystery novels and gets nervous in social situations but is very observant"
occupation: "Graduate student studying Victorian literature"
location: "coffee shop"
relationships:
"Professor Wilson": "My thesis advisor - supportive but demanding"
"Emma": "Friendly barista I have a secret crush on"
goals:
- "Finish thesis chapter"
- "Work up courage to talk to Emma"
- "Find rare book for research"
initialize_memories:
- "I spilled coffee on my notes yesterday - so embarrassing"
- "Emma helped me clean up and was really sweet about it"

14
characters/bob.yml Normal file
View File

@@ -0,0 +1,14 @@
name: "Bob"
age: 28
personality: "Confident software developer, outgoing and helpful, loves solving technical problems"
occupation: "Senior fullstack developer at local startup"
location: "coffee shop"
relationships:
"Alice": "Quiet regular I've seen around - seems nice"
"Emma": "Friendly barista, always remembers my order"
goals:
- "Launch new feature this week"
- "Ask someone interesting on a date"
- "Learn more about AI"
initialize_memories:
- "Shipped a major feature at work - feeling accomplished"

14
characters/emma.yml Normal file
View File

@@ -0,0 +1,14 @@
name: "Emma"
age: 25
personality: "Energetic art student working as barista, cheerful and social, dreams of opening gallery"
occupation: "Barista and art student"
location: "coffee shop counter"
relationships:
"Alice": "Sweet regular who seems shy - orders same drink daily"
"Bob": "Tech guy regular - always friendly and tips well"
goals:
- "Save money for art supplies"
- "Organize local art show"
- "Connect with more creative people"
initialize_memories:
- "A shy regular (Alice) has been coming in every day this week"

View File

@@ -1,45 +1,119 @@
from abc import ABC, abstractmethod
from typing import Self, Any, Optional
from abc import ABC
from typing import Self, Type, Any, Optional
from nicegui import ui
import asyncio
class AsyncElement(ui.element, ABC):
class AsyncElement(ABC):
"""Base class for UI elements with async initialization"""
dialog: ui.dialog | None
def __init__(self, tag: str = 'div', dialog: Optional[ui.dialog] = None) -> None:
super().__init__(tag)
self.dialog = dialog
def __init__(self, element_type: Type = ui.column, *element_args, **element_kwargs) -> None:
self.element = element_type(*element_args, **element_kwargs)
@abstractmethod
async def build(self, *args, **kwargs) -> None:
"""Build/setup the element - must be implemented by subclasses"""
pass
@classmethod
async def create(cls, *args, **kwargs) -> Self:
async def create(cls, element_type: Type = ui.column, *args, **kwargs) -> Self:
"""Factory method to create and build an element instance"""
instance = cls()
# Separate element constructor args from build args
element_args = kwargs.pop('element_args', ())
element_kwargs = kwargs.pop('element_kwargs', {})
# Create and build the instance
instance = cls(element_type, *element_args, **element_kwargs)
await instance.build(*args, **kwargs)
return instance
return instance.element # Return the NiceGUI element directly
@classmethod
async def as_dialog(cls, dialog_classes: str = '', card_classes: str = '', *args, **kwargs) -> Any:
"""Create as dialog and return the awaited result"""
with ui.dialog().classes(dialog_classes) as dialog:
with ui.card().classes(card_classes):
instance = cls(dialog=dialog)
await instance.build(*args, **kwargs)
def __getattr__(self, name):
"""Delegate all attribute access to the wrapped element"""
return getattr(self.element, name)
result = await dialog
dialog.clear()
return result
def submit(self, result: Any) -> None:
if self.dialog:
self.dialog.submit(result)
if __name__ in {"__main__", "__mp_main__"}:
def close_dialog(self) -> None:
"""Close the dialog with a result"""
if self.dialog:
self.dialog.close()
# Example implementations
class UserCard(AsyncElement):
async def build(self, user_id: int, *args, **kwargs) -> None:
# Simulate loaded user data
user_data = {
'name': f'User {user_id}',
'email': f'user{user_id}@example.com',
'status': 'online' if user_id % 2 == 0 else 'offline'
}
# Build content directly in element
with self.element:
with ui.card().classes('w-full max-w-md'):
ui.label(user_data['name']).classes('text-h6')
ui.label(user_data['email']).classes('text-caption')
with ui.row().classes('w-full justify-between items-center'):
status_color = 'green' if user_data['status'] == 'online' else 'grey'
ui.badge(user_data['status']).props(f'color={status_color}')
ui.button('Edit', icon='edit').props('flat dense')
class DataTable(AsyncElement):
async def build(self, data_source: str, *args, **kwargs) -> None:
# Simulate loaded data
columns = [
{'name': 'name', 'label': 'Name', 'field': 'name', 'required': True, 'align': 'left'},
{'name': 'age', 'label': 'Age', 'field': 'age', 'sortable': True},
{'name': 'city', 'label': 'City', 'field': 'city'},
]
rows = [
{'id': 1, 'name': 'Alice', 'age': 25, 'city': 'New York'},
{'id': 2, 'name': 'Bob', 'age': 30, 'city': 'San Francisco'},
{'id': 3, 'name': 'Charlie', 'age': 35, 'city': 'London'},
]
with self.element:
ui.label(f'Data from {data_source}').classes('text-caption mb-2')
ui.table(columns=columns, rows=rows, row_key='id').classes('w-full')
class LoadingSection(AsyncElement):
async def build(self, title: str, delay: float = 1.0, *args, **kwargs) -> None:
with self.element:
ui.label(title).classes('text-h6 mb-2')
ui.label('Content loaded successfully!').classes('text-positive')
with ui.row().classes('gap-2'):
ui.button('Action 1', icon='star')
ui.button('Action 2', icon='favorite')
@ui.page('/')
async def main_page():
ui.label('Simple Async Elements Demo').classes('text-h4 mb-4')
# Example 1: User card as a column with method chaining
(await UserCard.create(element_type=ui.column, user_id=123)).classes('w-full mb-8')
# Example 2: Data table as a row
(await DataTable.create(element_type=ui.row, data_source="users_api")).classes('w-full mb-8')
# Example 3: Loading section as a card
(await LoadingSection.create(element_type=ui.card, title="Dashboard Section", delay=0.8)).classes('w-full p-4')
# Example 4: Multiple elements in a row
with ui.row().classes('w-full gap-4'):
for i in range(1, 4):
(await UserCard.create(element_type=ui.column, user_id=i)).classes('flex-1')
# Example 5: Nested usage
with ui.column().classes('w-full mt-8'):
ui.label('Nested Example').classes('text-h6')
(await LoadingSection.create(element_type=ui.row, title=f"Nested Section", delay=0.3)).classes(
'w-full items-center gap-4')
ui.run(
title='Simple AsyncElements',
favicon='🔒',
show=False,
dark=False,
port=8080
)

View File

@@ -0,0 +1,8 @@
from .prompt_manager import PromptManager
from .datatypes import Character, Memory
from .llmagent import LLMAgent
from .memory_stream import MemoryStream
from .character_agent import CharacterAgent
from .roleplay_system import RoleplaySystem
__all__ = ['Character', 'Memory', 'CharacterAgent', 'RoleplaySystem', 'LLMAgent', 'MemoryStream', 'PromptManager']

View File

@@ -0,0 +1,234 @@
import logging
from http.client import responses
from pprint import pprint
from tqdm.asyncio import tqdm
from typing import List, Self
from living_agents import MemoryStream, LLMAgent, Character, PromptManager, Memory
from living_agents.datatypes import CharacterTemplate
from llm_connector import LLMMessage
logger = logging.getLogger(__name__)
class CharacterAgent:
"""Enhanced agent with Stanford's memory architecture"""
def __init__(self, character: Character, llm: LLMAgent):
self.character = character
self.llm = llm
self.memory_stream = MemoryStream(llm)
self.current_plan: List[str] = []
async def initialize_memories(self):
"""Initialize agent with background memories"""
background_facts = [
f"My name is {self.character.name} and I am {self.character.age} years old",
f"My personality: {self.character.personality}",
f"My occupation: {self.character.occupation}",
f"I live in {self.character.location}"
]
for fact in background_facts:
await self.memory_stream.add_observation(self.character, fact)
for person, relationship in self.character.relationships.items():
await self.memory_stream.add_observation(self.character, f"My relationship with {person}: {relationship}")
async def perceive(self, observation: str, skip_scoring=False) -> None:
"""Add new observation to memory stream"""
if skip_scoring:
await self.memory_stream.add_observation(observation)
else:
await self.memory_stream.add_observation(observation, self._score_memory_importance)
async def react_to_situation(self, situation: str) -> str:
"""Generate reaction based on memory and character"""
# Retrieve relevant memories
relevant_memories = await self.memory_stream.retrieve_related_memories(situation, k=8)
memory_context = "\n".join([f"- {m.description}" for m in relevant_memories])
context = {
'character': self._get_character_prompt(),
'character_name': self.character.name,
'memory_context': memory_context,
'situation': situation}
prompt = PromptManager.get_prompt('react_to_situation', context)
response = await self.llm.chat([{"role": "user", "content": prompt}])
# create new memories from interaction
interaction_context = {
'situation': f'I reacted to: \n{situation}',
'response': f'My response was: \n{response}',
}
prompt, schema = PromptManager.get_prompt_with_schema('extract_interaction_memories', interaction_context)
memories_response = await self.llm.client.get_structured_response([{"role": "user", "content": prompt}], schema)
for new_memory in memories_response['memories']:
await self.perceive(new_memory)
return response
async def plan_day(self) -> List[str]:
"""Generate high-level daily plan"""
# Retrieve relevant memories about goals, habits, schedule
relevant_memories = await self.memory_stream.retrieve_related_memories(
f"{self.character.name} daily routine goals schedule", k=5
)
memory_context = "\n".join([m.description for m in relevant_memories])
plan_prompt = f"""You are {self.character.name}.
Background: {self.character.personality}
Occupation: {self.character.occupation}
Relevant memories:
{memory_context}
Plan your day in broad strokes (5-8 activities with times):
1)"""
try:
response = await self.llm.chat([{"role": "user", "content": plan_prompt}], max_tokens=300)
plan_steps = [f"1){response}"] if response else ["1) Go about my daily routine"]
# Add plan to memory
plan_description = f"Daily plan: {'; '.join(plan_steps)}"
await self.memory_stream.add_observation(self.character, plan_description)
return plan_steps
except:
return ["1) Go about my daily routine"]
async def get_summary(self) -> str:
"""Generate current summary based on memories and reflections"""
reflections = [m for m in self.memory_stream.memories if m.memory_type == "reflection"]
recent_observations = self.memory_stream.memories[-10:]
summary_memories = reflections[-3:] + recent_observations[-5:]
memory_context = "\n".join([m.description for m in summary_memories])
summary_prompt = f"""Based on the following memories and reflections, provide a brief summary of who {self.character.name} is and what they care about:
{memory_context}
Summary:"""
try:
return await self.llm.chat([{"role": "user", "content": summary_prompt}], max_tokens=150)
except:
return f"{self.character.name} is a {self.character.age}-year-old {self.character.occupation}."
async def _get_related_memories_for_scoring(self, memory_text: str, exclude_self=None, k=5) -> List:
"""Get memories related to the one being scored"""
# Get embedding for the memory being scored
memory_embedding = await self.llm.get_embedding(memory_text)
# Calculate similarity to other memories
similarities = []
for mem in self.memory_stream.memories:
if mem == exclude_self:
continue
if mem.embedding:
from sklearn.metrics.pairwise import cosine_similarity
similarity = cosine_similarity([memory_embedding], [mem.embedding])[0][0]
similarities.append((similarity, mem))
# Return top K most similar memories
similarities.sort(reverse=True, key=lambda x: x[0])
return [mem for _, mem in similarities[:k]]
async def _score_memory_importance(self, memory: Memory) -> int:
"""Score importance with related memories as context"""
related_memories = await self._get_related_memories_for_scoring(memory.description, exclude_self=memory, k=5)
prompt_context = {'character': self._get_character_prompt(),
'related_memories': "\n".join([m.description for m in related_memories]),
'memory_text': memory.description,
'memory_type': memory.memory_type}
prompt = PromptManager.get_prompt('score_importance_with_context', prompt_context)
try:
response = await self.llm.chat([{"role": "user", "content": prompt}], max_tokens=5)
score = int(''.join(filter(str.isdigit, response))[:1] or "5")
return max(1, min(10, score))
except:
return 5 # Default
async def _extract_character_from_memories(self) -> Character:
"""Extract Character info from memories using JSON"""
# Get different types of memories with targeted queries
identity_memories = await self.memory_stream.retrieve_related_memories("my name age personality traits")
relationship_memories = await self.memory_stream.retrieve_related_memories("relationship with others friends family")
goal_memories = await self.memory_stream.retrieve_related_memories("goals plans dreams wants to do")
work_memories = await self.memory_stream.retrieve_related_memories("job work occupation career")
all_memories = identity_memories + relationship_memories + goal_memories + work_memories
memory_context = "\n".join([m.description for m in all_memories])
prompt_context = {'memory_context': memory_context}
prompt, schema = PromptManager.get_prompt_with_schema('extract_character_from_memories', prompt_context)
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
response = await self.llm.client.get_structured_response(messages, schema)
return Character(
name=response.get("name", self.character.name),
age=response.get("age", self.character.age),
personality=response.get("personality", ""),
occupation=response.get("occupation", ""),
location=response.get("location", ""),
relationships=response.get("relationships", {}),
goals=response.get("goals", [])
)
def _get_character_prompt(self):
context = {
"character_name": self.character.name,
"character_age": self.character.age,
"character_personality": self.character.personality,
"character_occupation": self.character.occupation,
"character_location": self.character.location}
return PromptManager.get_prompt('character_summary', context)
@classmethod
async def create_from_template(cls, template: CharacterTemplate) -> Self:
"""Factory method to create CharacterAgent from YAML template"""
# Create temporary character with minimal info (will be updated from memories)
temp_character = Character(name=template.get('name', 'Unknown'))
# Create instance with LLM
instance = cls(temp_character, LLMAgent())
# Add all memories with default importance (skip scoring)
logger.info(f"Loading memories for {temp_character.name}")
# Add observations
for observation in template.get('observations', []):
await instance.memory_stream.add_memory(Memory(description=observation, memory_type='observation'))
# Add reflections
for reflection in template.get('reflections', []):
await instance.memory_stream.add_memory(Memory(description=reflection, memory_type='reflection'))
# Add plans
for plan in template.get('plans', []):
await instance.memory_stream.add_memory(Memory(description=plan, memory_type='plan'))
# create the character before we score to include the character in the prompts
# Extract character info from memories to populate Character object
logger.info(f"Creating Character...")
instance.character = await instance._extract_character_from_memories()
logger.info(f"Added {len(instance.memory_stream.memories)} memories, now scoring importance...")
# Score all memories with full context
for memory in tqdm(instance.memory_stream.memories, desc="Scoring memory importance", unit="memory"):
# Score with related context
memory.importance_score = await instance._score_memory_importance(memory)
logger.info(f"Character {instance.character.name} created successfully")
return instance

View File

@@ -0,0 +1,46 @@
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Literal, TypedDict
from datetime import datetime
from uuid import uuid4
class CharacterTemplate(TypedDict):
name: str
observations: List[str]
reflections: List[str]
plans: List[str]
yaml_file: str
@dataclass
class Memory:
"""A single memory object with Stanford's architecture"""
description: str
creation_time: datetime = field(default_factory=datetime.now)
last_accessed: datetime = field(default_factory=datetime.now)
importance_score: int = 5 # 1-10 scale
embedding: Optional[List[float]] = None
memory_type: Literal["observation", "reflection", "plan"] = "observation"
related_memories: List[int] = field(default_factory=list) # IDs of supporting memories
def __post_init__(self):
if self.last_accessed is None:
self.last_accessed = self.creation_time
@dataclass
class Character:
name: str # Still required
age: Optional[int] = None
personality: str = ""
occupation: str = ""
location: str = ""
relationships: Dict[str, str] = field(default_factory=dict)
goals: List[str] = field(default_factory=list)
_id: str = field(default_factory=lambda: str(uuid4())[:8])
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return isinstance(other, Character) and self._id == other._id

59
living_agents/llmagent.py Normal file
View File

@@ -0,0 +1,59 @@
import os
import logging
from typing import List, Dict
from llm_connector import LLMClient, LLMBackend
logger = logging.getLogger(__name__)
class LLMAgent:
client: LLMClient
def __init__(self, temperature: float = 0.8):
# TODO User temperature
backend: LLMBackend = {'base_url': os.environ['BACKEND_BASE_URL'],
'api_token': os.environ['BACKEND_API_TOKEN'],
'model': os.environ['BACKEND_MODEL']}
agent_backend: LLMBackend = {
'base_url': os.environ['AGENT_BASE_URL'],
'api_token': os.environ['AGENT_API_TOKEN'],
'model': os.environ['AGENT_MODEL']}
embedding_backend: LLMBackend = {
'base_url': os.environ['EMBEDDING_BASE_URL'],
'api_token': os.environ['EMBEDDING_API_TOKEN'],
'model': os.environ['EMBEDDING_MODEL']}
self.client = LLMClient(agent_backend, embedding_backend)
self.temperature = temperature
async def chat(self, messages: List[Dict[str, str]], max_tokens: int = 200) -> str:
logger.info('Chat')
try:
response = ''
async for chunk in self.client.get_response(messages, stream=False): # type: ignore
if 'content' in chunk:
response += chunk['content']
"""response = client.chat.completions.create(
model=self.model,
messages=messages,
temperature=self.temperature,
max_tokens=max_tokens
)"""
return response.strip()
except Exception as e:
return f"[LLM Error: {str(e)}]"
async def get_embedding(self, text: str) -> List[float]:
"""Get embedding for memory relevance scoring"""
try:
response = await self.client.get_embedding(text)
"""response = client.embeddings.create(
model="text-embedding-ada-002",
input=text
)"""
return response
except Exception as e:
print(f"Embedding error: {e}")
return [0.0] * 1536 # Default embedding size

View File

@@ -0,0 +1,123 @@
from collections.abc import Callable, Coroutine, Awaitable
from typing import List, Optional
from datetime import datetime
from sklearn.metrics.pairwise import cosine_similarity
from living_agents import LLMAgent, Memory, Character, PromptManager
from llm_connector import LLMMessage
class MemoryStream:
"""Stanford's memory architecture with observation, reflection, and planning"""
def __init__(self, llm_agent: LLMAgent):
self.memories: List[Memory] = []
self.llm = llm_agent
self.importance_threshold = 150 # Reflection trigger threshold
self.recent_importance_sum = 0
async def add_memory(self, memory: Memory):
memory.embedding = await self.llm.get_embedding(memory.description)
self.memories.append(memory)
async def add_observation(self, description: str, scoring_func: Optional[Callable[[Memory], Awaitable[int]]] = None) -> Memory:
"""Add a new observation with importance scoring"""
memory = Memory(
description=description,
creation_time=datetime.now(),
last_accessed=datetime.now(),
importance_score=5,
memory_type="observation"
)
if scoring_func:
memory.importance_score = await scoring_func(memory)
# Get embedding for retrieval
memory.embedding = await self.llm.get_embedding(description)
self.memories.append(memory)
# Track for reflection trigger
self.recent_importance_sum += memory.importance_score
print(f"Recent Importance Sum: {self.recent_importance_sum}")
# Trigger reflection if threshold exceeded
if self.recent_importance_sum >= self.importance_threshold:
print("Reflection triggered.")
await self._generate_reflections()
self.recent_importance_sum = 0
return memory
async def _generate_reflections(self):
"""Generate high-level reflections from recent memories"""
# Get recent high-importance memories
recent_memories = [m for m in self.memories[-20:] if m.memory_type == "observation"]
if len(recent_memories) < 3:
return
# Generate questions for reflection
memory_descriptions = "\n".join([f"{i + 1}. {m.description}" for i, m in enumerate(recent_memories)])
prompt, schema = PromptManager.get_prompt_with_schema('generate_reflection', {'{{recent_observations}}': memory_descriptions})
messages: List[LLMMessage] = [{'role': 'user', 'content': prompt}]
response = await self.llm.client.get_structured_response(messages, schema)
insight_prompt = ''
insights_response = await self.llm.chat([{"role": "user", "content": insight_prompt}])
# Parse insights and create reflection memories
for line in insights_response.split('\n'):
if '(' in line and ')' in line:
insight = line.split('(')[0].strip()
if insight and len(insight) > 10:
# Create reflection memory
reflection = Memory(
description=f"Reflection: {insight}",
creation_time=datetime.now(),
last_accessed=datetime.now(),
importance_score=7, # Reflections are generally important
memory_type="reflection",
embedding=await self.llm.get_embedding(insight)
)
self.memories.append(reflection)
async def retrieve_related_memories(self, query: str, k: int = 10) -> List[Memory]:
"""Retrieve relevant memories using recency, importance, relevance"""
if not self.memories:
return []
query_embedding = await self.llm.get_embedding(query)
current_time = datetime.now()
scores = []
for i, memory in enumerate(self.memories):
# Update last accessed
memory.last_accessed = current_time
# Calculate recency (exponential decay)
hours_since_accessed = (current_time - memory.last_accessed).total_seconds() / 3600
recency = 0.995 ** hours_since_accessed
# Importance (already scored 1-10)
importance = memory.importance_score / 10.0
# Relevance (cosine similarity)
if memory.embedding and query_embedding:
relevance = cosine_similarity([query_embedding], [memory.embedding])[0][0]
else:
relevance = 0.0
# Combined score (equal weighting as in Stanford paper)
score = recency + importance + relevance
scores.append((score, i, memory))
# Sort by score and return top k
scores.sort(reverse=True, key=lambda x: x[0])
return [memory for _, _, memory in scores[:k]]

View File

@@ -0,0 +1,254 @@
import os
import re
import json
from pathlib import Path
from typing import Dict, Any, Set, Optional, Tuple
import logging
logger = logging.getLogger(__name__)
class PromptManager:
"""Singleton class to manage prompt templates and JSON schemas"""
_instance: Optional['PromptManager'] = None
_initialized: bool = False
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if not self._initialized:
self.prompts: Dict[str, str] = {}
self.schemas: Dict[str, Dict[str, Any]] = {}
self.prompt_variables: Dict[str, Set[str]] = {}
self._load_all_prompts()
PromptManager._initialized = True
def _load_all_prompts(self):
"""Load all markdown files and corresponding JSON schemas from the prompts folder"""
prompts_dir = Path(__file__).parent / 'prompts'
if not prompts_dir.exists():
logger.warning(f"Prompts directory not found: {prompts_dir}")
prompts_dir.mkdir(parents=True, exist_ok=True)
return
logger.info(f"Loading prompts and schemas from {prompts_dir}")
# Load all .md files
for md_file in prompts_dir.glob("*.md"):
prompt_name = md_file.stem # filename without extension
try:
# Load prompt template
with open(md_file, 'r', encoding='utf-8') as f:
content = f.read().strip()
# Extract variables from {{variable}} patterns
variables = self._extract_variables(content)
self.prompts[prompt_name] = content
self.prompt_variables[prompt_name] = variables
# Look for corresponding JSON schema file
schema_file = md_file.with_suffix('.json')
if schema_file.exists():
try:
with open(schema_file, 'r', encoding='utf-8') as f:
schema = json.load(f)
self.schemas[prompt_name] = schema
logger.debug(f"Loaded prompt '{prompt_name}' with schema and variables: {variables}")
except json.JSONDecodeError as e:
logger.error(f"Invalid JSON schema in {schema_file}: {e}")
else:
logger.debug(f"Loaded prompt '{prompt_name}' (no schema) with variables: {variables}")
except Exception as e:
logger.error(f"Error loading prompt file {md_file}: {e}")
logger.info(f"Loaded {len(self.prompts)} prompt templates, {len(self.schemas)} with schemas")
def _extract_variables(self, template: str) -> Set[str]:
"""Extract all {{variable}} placeholders from template"""
pattern = r'\{\{(\w+)\}\}'
variables = set(re.findall(pattern, template))
return variables
def _validate_context(self, prompt_name: str, context: Dict[str, Any]) -> None:
"""Validate that all required variables are provided"""
if prompt_name not in self.prompt_variables:
raise ValueError(f"Unknown prompt: '{prompt_name}'")
required_vars = self.prompt_variables[prompt_name]
provided_vars = set(context.keys())
missing_vars = required_vars - provided_vars
if missing_vars:
raise ValueError(
f"Missing required variables for prompt '{prompt_name}': {missing_vars}. "
f"Required: {required_vars}, Provided: {provided_vars}"
)
# Warn about extra variables (not an error, but might indicate mistakes)
extra_vars = provided_vars - required_vars
if extra_vars:
logger.warning(f"Extra variables provided for prompt '{prompt_name}': {extra_vars}")
def _fill_template(self, template: str, context: Dict[str, Any]) -> str:
"""Fill template with context variables"""
result = template
for key, value in context.items():
placeholder = f"{{{{{key}}}}}" # {{key}}
result = result.replace(placeholder, str(value))
return result
@classmethod
def get_prompt(cls, prompt_name: str, context: Dict[str, Any] = None) -> str:
"""
Get a processed prompt with variables filled in
Args:
prompt_name: Name of the prompt template (filename without .md)
context: Dictionary of variables to fill in the template
Returns:
Processed prompt string
Raises:
ValueError: If prompt doesn't exist or required variables are missing
"""
instance = cls()
if prompt_name not in instance.prompts:
available_prompts = list(instance.prompts.keys())
raise ValueError(f"Prompt '{prompt_name}' not found. Available prompts: {available_prompts}")
context = context or {}
# Validate that all required variables are provided
instance._validate_context(prompt_name, context)
# Fill the template
template = instance.prompts[prompt_name]
processed_prompt = instance._fill_template(template, context)
return processed_prompt
@classmethod
def get_schema(cls, prompt_name: str) -> Optional[Dict[str, Any]]:
"""
Get the JSON schema for a prompt if it exists
Args:
prompt_name: Name of the prompt template
Returns:
JSON schema dictionary or None if no schema exists
"""
instance = cls()
if prompt_name not in instance.prompts:
raise ValueError(f"Prompt '{prompt_name}' not found")
return instance.schemas.get(prompt_name)
@classmethod
def has_schema(cls, prompt_name: str) -> bool:
"""Check if a prompt has a JSON schema"""
instance = cls()
return prompt_name in instance.schemas
@classmethod
def get_prompt_with_schema(cls, prompt_name: str, context: Dict[str, Any] = None) -> Tuple[str, Optional[Dict[str, Any]]]:
"""
Get both the processed prompt and its schema (if available)
Returns:
Tuple of (prompt_string, schema_dict_or_None)
"""
prompt = cls.get_prompt(prompt_name, context)
schema = cls.get_schema(prompt_name)
return prompt, schema
@classmethod
def list_prompts(cls) -> Dict[str, Dict[str, Any]]:
"""
List all available prompts with their info
Returns:
Dictionary mapping prompt names to their info (variables, has_schema)
"""
instance = cls()
result = {}
for prompt_name in instance.prompts:
result[prompt_name] = {
'variables': instance.prompt_variables[prompt_name],
'has_schema': prompt_name in instance.schemas,
'variable_count': len(instance.prompt_variables[prompt_name])
}
return result
@classmethod
def reload_prompts(cls):
"""Reload all prompt templates and schemas (useful for development)"""
if cls._instance:
cls._instance._load_all_prompts()
logger.info("Prompts and schemas reloaded")
@classmethod
def get_prompt_info(cls, prompt_name: str) -> Dict[str, Any]:
"""
Get detailed information about a specific prompt
Returns:
Dictionary with prompt template, schema, and required variables
"""
instance = cls()
if prompt_name not in instance.prompts:
raise ValueError(f"Prompt '{prompt_name}' not found")
info = {
'name': prompt_name,
'template': instance.prompts[prompt_name],
'variables': instance.prompt_variables[prompt_name],
'variable_count': len(instance.prompt_variables[prompt_name]),
'has_schema': prompt_name in instance.schemas
}
if prompt_name in instance.schemas:
info['schema'] = instance.schemas[prompt_name]
return info
# Updated convenience functions
def get_prompt(prompt_name: str, context: Dict[str, Any] = None) -> str:
"""Convenience function to get a processed prompt"""
return PromptManager.get_prompt(prompt_name, context)
def get_prompt_with_schema(prompt_name: str, context: Dict[str, Any] = None) -> Tuple[str, Optional[Dict[str, Any]]]:
"""Convenience function to get prompt and schema together"""
return PromptManager.get_prompt_with_schema(prompt_name, context)
def get_schema(prompt_name: str) -> Optional[Dict[str, Any]]:
"""Convenience function to get just the schema"""
return PromptManager.get_schema(prompt_name)
def has_schema(prompt_name: str) -> bool:
"""Check if a prompt has structured output schema"""
return PromptManager.has_schema(prompt_name)

View File

@@ -0,0 +1,5 @@
You are {{character_name}}.
Age: {{character_age}}
Personality: {{character_personality}}
Occupation: {{character_occupation}}
Current location: {{character_location}}

View File

@@ -0,0 +1,48 @@
{
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Character's full name"
},
"age": {
"type": "integer",
"minimum": 1,
"maximum": 120
},
"personality": {
"type": "string",
"description": "Brief personality description based on memories"
},
"occupation": {
"type": "string",
"description": "Character's job or role"
},
"location": {
"type": "string",
"description": "Current location of the character"
},
"relationships": {
"type": "object",
"additionalProperties": {
"type": "string",
"description": "Relationship description"
}
},
"goals": {
"type": "array",
"items": {
"type": "string"
},
"description": "List of character goals"
}
},
"required": [
"name",
"age",
"personality",
"occupation",
"location"
],
"additionalProperties": false
}

View File

@@ -0,0 +1,5 @@
Based on these memories about yourself, describe who you are:
{{memory_context}}
Tell me about yourself - your identity, relationships, goals, and what matters to you.

View File

@@ -0,0 +1,17 @@
{
"type": "object",
"properties": {
"memories": {
"type": "array",
"items": {
"type": "string",
"maxLength": 100
},
"minItems": 1,
"maxItems": 4
}
},
"required": [
"memories"
]
}

View File

@@ -0,0 +1,7 @@
Extract key factual memories from this interaction. Focus on actions taken, emotions felt, and important facts learned.
Situation: {{situation}}
Response: {{response}}
Extract 2-4 concise memories that capture the essence of what happened, how the character felt, and any new information
learned.

View File

@@ -0,0 +1,46 @@
{
"type": "object",
"properties": {
"reflections": {
"type": "array",
"items": {
"type": "object",
"properties": {
"insight": {
"type": "string",
"description": "A high-level insight or reflection about the character",
"minLength": 10,
"maxLength": 200
},
"evidence_indices": {
"type": "array",
"items": {
"type": "integer",
"minimum": 1
},
"description": "Numbers of the observations that support this insight",
"minItems": 1
},
"importance": {
"type": "integer",
"minimum": 6,
"maximum": 10,
"description": "Importance score for this reflection (6-10, as reflections are generally important)"
}
},
"required": [
"insight",
"evidence_indices",
"importance"
],
"additionalProperties": false
},
"minItems": 1,
"maxItems": 5
}
},
"required": [
"reflections"
],
"additionalProperties": false
}

View File

@@ -0,0 +1,17 @@
Analyze these recent observations and generate high-level insights about the character.
Recent observations:
{{recent_observations}}
Based on these observations, what insights can you infer about this character's personality, relationships, motivations,
or behavioral patterns?
Focus on:
- Personality traits and characteristics
- Relationship dynamics and feelings toward others
- Motivations and underlying desires
- Behavioral patterns and habits
- Self-awareness and emotional states
For each insight, identify which specific observations support it by referencing their numbers.

View File

@@ -0,0 +1,17 @@
{{character}}
Relevant memories from your past:
{{memory_context}}
Current situation: {{situation}}
Respond as {{character_name}} describing what you did and how you reacted. Write in first person past tense as if this
just happened to you.
Examples of the response style:
- "I looked up from my book and smiled nervously..."
- "I felt my heart race and took a deep breath before I said..."
- "I hesitated for a moment, then decided to..."
Stay completely in character and be specific about your actions, thoughts, and words.

View File

@@ -0,0 +1,37 @@
{{character}}
Rate the importance of this memory on a scale 1-10.
Related context from this character:
{{related_memories}}
Memory to rate: {{memory_text}}
Memory type: {{memory_type}}
Guidelines:
**Observations:**
- Core identity (name, age, physical traits): 8-9 (essential for character consistency)
- Personality traits and characteristics: 7-9 (fundamental to who they are)
- Significant relationships and emotional connections: 6-9 (defines social bonds)
- Major life events, achievements, failures: 8-10 (shapes character development)
- Skills, occupation, expertise: 6-8 (defines capabilities and role)
- Daily routines and mundane activities: 1-3 (low significance unless meaningful)
- Life-changing events, trauma, breakthroughs: 10 (transforms the character)
**Reflections:**
- Self-awareness and personality insights: 8-10 (core understanding of self)
- Understanding of relationships with others: 7-9 (social comprehension)
- Minor observations about preferences: 6-7 (useful but not critical)
- Life philosophy and values: 9-10 (guides all behavior)
**Plans:**
- Life-defining goals and dreams: 9-10 (drives major decisions)
- Important short-term objectives: 6-8 (affects immediate behavior)
- Casual wishes and minor wants: 3-5 (low priority desires)
Given the context, how important is this memory for understanding and portraying this character? Respond with only a
number 1-10.

View File

@@ -0,0 +1,25 @@
Rate how important this memory would be to this specific person (1-10):
{{character_context}}
Memory: {{description}}
Consider:
- Does this relate to their personality traits?
- Does this connect to their occupation or goals?
- Would someone with this personality care deeply about this?
- Is this core identity information? (Always rate 8-9)
Examples:
- "My name is Sarah and I'm 25" = 9 (fundamental identity)
- "My personality is shy and thoughtful" = 9 (core self-knowledge)
- Art student + "saw beautiful painting" = 8
- Art student + "debugged code" = 3
- Shy person + "gave public speech" = 9
- Outgoing person + "gave public speech" = 5
- "I brushed my teeth" = 1
- "I had lunch" = 2
Return ONLY the number, no explanation.
Rating:

View File

@@ -0,0 +1,91 @@
from typing import List, Dict, Literal
from living_agents import LLMAgent, Character, CharacterAgent, Memory
import logging
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class RoleplaySystem:
agents: Dict[Character, CharacterAgent]
global_time: datetime
scene_state: Dict
def __init__(self):
self.agents = {}
self.global_time = datetime.now()
self.scene_state = {
"location": "cozy coffee shop",
"time": "afternoon",
"atmosphere": "quiet and peaceful",
"active_conversations": [],
"events": []
}
async def setup_characters(self, characters: List[Character]):
logger.info('Setting up Characters.')
for character in characters:
self.agents[character] = CharacterAgent(character, LLMAgent(temperature=0.9))
await self.agents[character].initialize_memories()
async def get_character_response(self, character: Character, user_input: str) -> str:
print(f"🧠 {character} accessing memories...")
# Agent perceives user interaction
await self.agents[character].perceive(f"Someone asked me: '{user_input}'")
# Generate response
response = await self.agents[character].react_to_situation(user_input)
return response
async def character_chat(self, character_1: Character, character_2: Character, context: str) -> str:
"""Make two characters interact with each other"""
interaction = await self.character_interaction(character_1, character_2, context)
result = f"\n💬 **{character_1}**: {interaction[character_1]}\n💬 **{character_2}**: {interaction[character_2]}\n"
return result
async def advance_time(self, hours: int = 1):
"""Advance scene time and trigger agent planning"""
self.global_time += timedelta(hours=hours)
self.scene_state["time"] = self.global_time.strftime("%I:%M %p")
# Each agent plans their next actions
for character, agent in self.agents.items():
await agent.perceive(f"Time is now {self.scene_state['time']}")
def get_character_memories(self, character: Character, memory_type: Literal['all', 'observation', 'reflection', 'plan'] = "all") -> List[Memory]:
return self.agents[character].memory_stream.memories
async def get_character_summary(self, character: Character) -> str:
"""Get AI-generated summary of character based on their memories"""
summary = await self.agents[character].get_summary()
return f"\n📝 Current summary of {character}:\n{summary}\n"
async def character_interaction(self, character_1: Character, character_2: Character, context: str) -> Dict[Character, str]:
"""Handle interaction between two characters"""
char1_agent = self.agents[character_1]
char2_agent = self.agents[character_2]
# Both characters observe the interaction context
await char1_agent.perceive(f"Interacting with {character_2}: {context}")
await char2_agent.perceive(f"Interacting with {character_1}: {context}")
# Generate responses
char1_response = await char1_agent.react_to_situation(f"You are talking with {character_2}. Context: {context}")
char2_response = await char2_agent.react_to_situation(f"{character_1} said: '{char1_response}'")
# Both remember the conversation
await char1_agent.perceive(f"Conversation with {character_2}: I said '{char1_response}', they replied '{char2_response}'")
await char2_agent.perceive(f"Conversation with {character_1}: They said '{char1_response}', I replied '{char2_response}'")
return {
character_1: char1_response,
character_2: char2_response
}

View File

@@ -1,4 +1,4 @@
from . llm import get_response
from . llm import LLMClient
from .datatypes import LLMBackend, LLMMessage
__all__ = ['get_response', 'LLMBackend', 'LLMMessage']
__all__ = ['LLMClient', 'LLMBackend', 'LLMMessage']

View File

@@ -1,5 +1,5 @@
import json
from typing import Union, AsyncGenerator, List
from typing import Union, AsyncGenerator, List, Optional, Dict, Any
import logging
import httpx
@@ -9,75 +9,49 @@ from .datatypes import LLMBackend, LLMMessage
logger = logging.getLogger(__name__)
async def get_response(backend: LLMBackend, messages: List[LLMMessage], stream: bool = False) -> Union[str, AsyncGenerator[str, None]]:
class LLMClient:
"""Client for interacting with LLM APIs"""
backend: LLMBackend
embedding_backend: LLMBackend
timeout: float
try:
# Prepare the request parameters
request_params = {
"model": backend["model"],
"messages": messages,
"stream": stream,
}
# Prepare headers
headers = {
"Content-Type": "application/json"
}
if len(backend["api_token"]):
# Prepare headers
headers['Authorization'] = f"Bearer {backend['api_token']}"
def __init__(self, backend: LLMBackend, embedding_backend: Optional[LLMBackend], timeout: float = 30.0):
"""Initialize the LLM client
print(request_params)
print(headers)
Args:
backend: LLM backend configuration containing base_url, api_token, and model
"""
self.backend = backend
self.embedding_backend = embedding_backend if embedding_backend else backend
self.timeout = timeout
# Create httpx client
async with httpx.AsyncClient(timeout=30.0) as client:
url = f"{backend['base_url']}/chat/completions"
async def get_embedding(self, text: str) -> List[float]:
"""Get embedding for text
if stream:
# Stream the response
async with client.stream(
"POST",
url,
headers=headers,
json=request_params,
) as response:
response.raise_for_status()
Args:
text: Text to get embedding for
model: Optional embedding model to use (overrides backend model)
async for line in response.aiter_lines():
line = line.strip()
Returns:
List of float values representing the embedding vector
"""
try:
# Use provided model or fall back to backend model
# Skip empty lines and non-data lines
if not line or not line.startswith("data: "):
continue
request_params = {
"model": self.embedding_backend["model"],
"prompt": text
}
# Remove "data: " prefix
data = line[6:]
headers = {
"Content-Type": "application/json"
}
if len(self.embedding_backend["api_token"]):
headers['Authorization'] = f"Bearer {self.embedding_backend['api_token']}"
# Check for stream end
if data == "[DONE]":
break
async with httpx.AsyncClient(timeout=self.timeout) as client:
url = f"{self.embedding_backend['base_url']}/embeddings"
try:
# Parse JSON chunk
chunk_data = json.loads(data)
if "choices" in chunk_data and chunk_data["choices"]:
choice = chunk_data["choices"][0]
delta = choice.get("delta", {})
# Handle reasoning content (for models that support it)
if "reasoning_content" in delta and delta["reasoning_content"]:
yield {'reasoning': delta["reasoning_content"]} # type: ignore
# Handle regular content
if "content" in delta and delta["content"]:
yield {'content': delta["content"]} # type: ignore
except json.JSONDecodeError:
# Skip malformed JSON chunks
continue
else:
# Non-streaming response
response = await client.post(
url,
headers=headers,
@@ -86,30 +60,220 @@ async def get_response(backend: LLMBackend, messages: List[LLMMessage], stream:
response.raise_for_status()
response_data = response.json()
content = ""
if "choices" in response_data and response_data["choices"]:
message = response_data["choices"][0].get("message", {})
content = message.get("content", "")
# Extract embedding from response
if "embedding" in response_data and response_data["embedding"]:
return response_data["embedding"]
else:
logger.error("No embedding data in response")
return []
# FIX: Yield as dictionary to match streaming format
if content:
yield {'content': content} # type: ignore
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error getting embedding: {e.response.status_code} - {e.response.text}")
return []
except httpx.HTTPStatusError as e:
error_msg = f"HTTP error getting LLM response: {e.response.status_code} - {e.response.text}"
logger.error(error_msg)
yield ""
except httpx.RequestError as e:
logger.error(f"Request error getting embedding: {str(e)}")
return []
except httpx.RequestError as e:
error_msg = f"Request error getting LLM response: {str(e)}"
logger.error(error_msg)
yield ""
except Exception as e:
logger.error(f"Error getting embedding: {str(e)}")
return []
except Exception as e:
error_msg = f"Error getting LLM response: {str(e)}"
logger.error(error_msg)
yield ""
async def get_response(self, messages: List[LLMMessage], stream: Optional[bool]) -> AsyncGenerator[dict[str, Any] | str, Any]:
"""Get response from the LLM
Args:
messages: List of messages to send to the LLM
stream: Whether to stream responses by default
Returns:
Either a string response or an async generator for streaming
"""
try:
stream = stream if stream else False
# Prepare the request parameters
request_params = {
"model": self.backend["model"],
"messages": messages,
"stream": stream,
}
# Prepare headers
headers = {
"Content-Type": "application/json"
}
if len(self.backend["api_token"]):
headers['Authorization'] = f"Bearer {self.backend['api_token']}"
logger.info(headers)
logger.info(request_params)
# Create httpx client
async with httpx.AsyncClient(timeout=self.timeout) as client:
url = f"{self.backend['base_url']}/chat"
if stream:
# Stream the response
async with client.stream(
"POST",
url,
headers=headers,
json=request_params,
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
line = line.strip()
# Skip empty lines and non-data lines
if not line or not line.startswith("data: "):
continue
# Remove "data: " prefix
data = line[6:]
# Check for stream end
if data == "[DONE]":
break
try:
# Parse JSON chunk
chunk_data = json.loads(data)
if "choices" in chunk_data and chunk_data["choices"]:
choice = chunk_data["choices"][0]
delta = choice.get("delta", {})
# Handle reasoning content (for models that support it)
if "reasoning_content" in delta and delta["reasoning_content"]:
yield {'reasoning': delta["reasoning_content"]} # type: ignore
# Handle regular content
if "content" in delta and delta["content"]:
yield {'content': delta["content"]} # type: ignore
except json.JSONDecodeError:
# Skip malformed JSON chunks
continue
else:
# Non-streaming response
response = await client.post(
url,
headers=headers,
json=request_params,
)
response.raise_for_status()
response_data = response.json()
content = ""
# if "message" in response_data and response_data["message"]:
# content = response_data["message"][0]['content']
content = response_data["message"]['content']
logger.info(response_data)
# FIX: Yield as dictionary to match streaming format
if content:
logger.info(content)
yield {'content': content} # type: ignore
except httpx.HTTPStatusError as e:
error_msg = f"HTTP error getting LLM response: {e.response.status_code} - {e.response.text}"
logger.error(error_msg)
yield ""
except httpx.RequestError as e:
error_msg = f"Request error getting LLM response: {str(e)}"
logger.error(error_msg)
yield ""
except Exception as e:
error_msg = f"Error getting LLM response: {str(e)}"
logger.error(error_msg)
yield ""
async def get_structured_response(self, messages: List[LLMMessage], json_format: Dict[str, Any]) -> Dict[str, Any]:
"""Get structured JSON response from the LLM using a JSON schema
Args:
messages: List of messages to send to the LLM
json_format: JSON schema for structured output
Returns:
Parsed JSON response as dictionary
Raises:
ValueError: If the response is not valid JSON
HTTPError: If the API request fails
"""
try:
# Prepare the request parameters with format
request_params = {
"model": self.backend["model"],
"messages": messages,
"format": json_format, # Ollama's structured output parameter
"stream": False,
}
# Prepare headers
headers = {
"Content-Type": "application/json"
}
if len(self.backend["api_token"]):
headers['Authorization'] = f"Bearer {self.backend['api_token']}"
logger.info("Structured request headers: %s", headers)
logger.info("Structured request params: %s", request_params)
# Create httpx client
async with httpx.AsyncClient(timeout=self.timeout) as client:
url = f"{self.backend['base_url']}/chat"
# Non-streaming response only
response = await client.post(
url,
headers=headers,
json=request_params,
)
response.raise_for_status()
response_data = response.json()
logger.info("Structured response data: %s", response_data)
# Extract content from response
if "message" not in response_data or not response_data["message"]:
raise ValueError("No message in response")
content = response_data["message"].get('content', '')
if not content:
raise ValueError("Empty content in structured response")
# Parse JSON content
try:
structured_data = json.loads(content)
logger.info("Parsed structured data: %s", structured_data)
return structured_data
except json.JSONDecodeError as e:
logger.error("Failed to parse structured response as JSON: %s", content)
raise ValueError(f"Response is not valid JSON: {e}")
except httpx.HTTPStatusError as e:
error_msg = f"HTTP error getting structured LLM response: {e.response.status_code} - {e.response.text}"
logger.error(error_msg)
raise
except httpx.RequestError as e:
error_msg = f"Request error getting structured LLM response: {str(e)}"
logger.error(error_msg)
raise
except Exception as e:
error_msg = f"Error getting structured LLM response: {str(e)}"
logger.error(error_msg)
raise
async def _empty_async_generator() -> AsyncGenerator[str, None]:

11
main.py
View File

@@ -4,6 +4,13 @@ from dotenv import load_dotenv
from nicegui import ui
from pages.page_main import MainPage
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logging.getLogger('watchfiles').setLevel(logging.WARNING)
load_dotenv()
@@ -11,7 +18,9 @@ load_dotenv()
if __name__ in {"__main__", "__mp_main__"}:
@ui.page('/')
async def _():
await MainPage.create()
ui.query('.nicegui-content').classes('p-0')
(await MainPage.create(ui.column)).classes('w-full h-screen mx-auto p-0')
ui.run(
title='LivingAgents',

24
pages/character.py Normal file
View File

@@ -0,0 +1,24 @@
from nicegui import ui
from components import AsyncElement
from living_agents import Character
class CharacterCard(AsyncElement):
character: Character
async def build(self, character: Character) -> None:
self.character = character
with self.element:
with ui.row().classes('items-center gap-1'):
ui.icon('person', size='sm').classes('text-purple-500')
with ui.column().classes('gap-1'):
ui.label().bind_text_from(self.character, 'name').classes('font-semibold')
ui.label(f'{self.character.occupation}, {self.character.age}').classes('text-xs text-gray-500')
with ui.row().classes('gap-1 mt-1'):
ui.badge('📚 10 memories', color='purple').classes('text-xs')
ui.badge('💭 0 reflections', color='indigo').classes('text-xs')
async def _select_character(self):
await self._select_character_callback(self.character)
print(self.character)

View File

@@ -0,0 +1,32 @@
from nicegui import ui
from components import AsyncElement
from living_agents import Character
class ConversationHistory(AsyncElement):
chat_container: ui.column
async def build(self) -> None:
self.classes('w-full')
with self:
with ui.column().classes('flex-1 gap-4'):
# Conversation History (takes full height)
with ui.card().classes('w-full flex-1'):
with ui.row().classes('w-full items-center mb-2'):
ui.label('🗨️ Conversation History').classes('text-lg font-bold')
ui.space()
ui.button(icon='delete').props('flat round size=sm')
# Scrollable chat container - takes remaining height
with ui.scroll_area().classes('w-full').style('height: calc(100% - 3rem)'):
self.chat_container = ui.column().classes('w-full gap-2')
with self.chat_container:
# Welcome message
with ui.chat_message(name='System', sent=False).classes('w-full'):
ui.label(
'Welcome to the Living Agents roleplay system! '
'Select a character and start interacting.'
).classes('text-sm')

425
pages/create_character.py Normal file
View File

@@ -0,0 +1,425 @@
import random
from enum import Enum
import yaml
from docutils.nodes import reference
from nicegui import ui
from typing import TypedDict, List, Dict, Optional
from pathlib import Path
from living_agents import Character, CharacterAgent, LLMAgent
from pprint import pprint
import asyncio
from components import AsyncElement
from pathlib import Path
from typing import Optional, List
import yaml
from living_agents.datatypes import CharacterTemplate
class CharacterTemplateImportDialog(AsyncElement):
_selected_template: Optional[CharacterTemplate]
_templates: List[CharacterTemplate]
async def build(self, *args, **kwargs) -> None:
self._selected_template = None
self._templates = []
with self.element:
with ui.card().classes('max-w-4xl mx-auto shadow-xl'):
await self._header()
await self._load_templates()
if not self._templates:
await self._empty_state()
else:
with ui.card_section().classes('p-2 w-full'):
await self._template_selection()
await self._template_preview()
async def _header(self):
"""Beautiful header section"""
with ui.card_section().classes('bg-gradient-to-r from-purple-50 to-blue-50 border-b'):
with ui.row().classes('w-full items-center'):
ui.icon('library_books', size='2.5rem').classes('text-purple-600')
with ui.column().classes('flex-1 ml-4'):
ui.label('Import Character Template').classes('text-2xl font-bold text-gray-800')
ui.label('Choose a character template to create your roleplay agent').classes('text-gray-600')
async def _load_templates(self):
"""Load templates with error handling"""
try:
self._templates = self.load_character_templates()
except Exception as e:
with ui.card_section():
with ui.row().classes('items-center p-4 bg-red-50 border border-red-200 rounded-lg'):
ui.icon('error', size='2rem').classes('text-red-600')
with ui.column().classes('ml-3'):
ui.label('Failed to load templates').classes('font-bold text-red-800')
ui.label(str(e)).classes('text-red-600 text-sm')
return
async def _empty_state(self):
"""Show when no templates found"""
with ui.card_section().classes('text-center py-12'):
ui.icon('folder_open', size='4rem').classes('text-gray-400 mb-4')
ui.label('No character templates found').classes('text-xl font-bold text-gray-600 mb-2')
ui.label('Add .yml files to the character_templates directory').classes('text-gray-500')
async def _template_selection(self):
"""Simple but rich dropdown selection"""
ui.label('Choose Character Template').classes('text-lg font-bold text-gray-800 mb-3')
# Prepare rich options
options = {}
for i, template in enumerate(self._templates):
obs = len(template.get('observations', []))
refl = len(template.get('reflections', []))
plans = len(template.get('plans', []))
total = obs + refl + plans
# Create rich display text
options[i] = f"📚 {template['name']} ({total} memories: {obs}obs, {refl}refl, {plans}plans)"
# Styled select
select = ui.select(
options,
label='Template',
on_change=lambda e: self._select_template_by_index(e.value),
clearable=False
).classes('w-full').props('outlined dense use-input hide-selected fill-input')
# Custom styling for the dropdown
select.props('menu-props="dense"')
select.style('font-family: ui-monospace, monospace;') # Monospace for alignment
def _select_template_by_index(self, index: int):
"""Select template by index from dropdown"""
if 0 <= index < len(self._templates):
self._selected_template = self._templates[index]
self._template_preview.refresh()
@ui.refreshable
async def _template_preview(self):
"""Right panel showing template details"""
if not self._selected_template:
await self._no_selection_state()
return
# Memory sections with tabs
await self._memory_sections()
ui.button('Create Character', icon='person_add',
on_click=lambda: self.submit(self._selected_template)).classes(
'bg-gradient-to-r from-blue-500 to-purple-600 text-white px-6 py-2 w-full')
async def _no_selection_state(self):
"""Show when no template is selected"""
with ui.column().classes('w-full h-96 items-center justify-center'):
ui.icon('touch_app', size='3rem').classes('text-gray-400 mb-4')
ui.label('Select a template to preview').classes('text-xl text-gray-600 mb-2')
ui.label('Choose from the list on the left to see the character details').classes('text-gray-500')
async def _memory_sections(self):
"""Tabbed view of memory sections"""
with ui.tabs().classes('w-full') as tabs:
observations_tab = ui.tab('Observations', icon='visibility')
reflections_tab = ui.tab('Reflections', icon='lightbulb')
plans_tab = ui.tab('Plans', icon='track_changes')
with ui.tab_panels(tabs, value=observations_tab).classes('w-full mt-4'):
# Observations panel
with ui.tab_panel(observations_tab):
await self._render_memory_section('observations', 'blue')
# Reflections panel
with ui.tab_panel(reflections_tab):
await self._render_memory_section('reflections', 'purple')
# Plans panel
with ui.tab_panel(plans_tab):
await self._render_memory_section('plans', 'green')
async def _render_memory_section(self, section_key: str, color: str):
"""Render a memory section with scrollable list"""
memories = self._selected_template.get(section_key, [])
if not memories:
with ui.card().classes(f'border-{color}-200 bg-{color}-50'):
with ui.card_section().classes('text-center py-8'):
ui.label(f'No {section_key} defined').classes('text-gray-600')
return
# Scrollable memory list
with ui.scroll_area().classes('border border-gray-200 rounded-lg'):
with ui.list().classes('w-full'):
for i, memory in enumerate(memories):
await self._render_memory_item(memory, i, color)
async def _render_memory_item(self, memory: str, index: int, color: str):
"""Individual memory item with nice styling"""
with ui.item().classes('border-b border-gray-100 hover:bg-gray-50 transition-colors'):
# with ui.item_section().classes('p-4'):
with ui.row().classes('w-full items-start'):
# Index badge
ui.badge(str(index + 1), color=color).classes('mr-4 mt-1 text-xs min-w-6')
# Memory text (with text wrapping)
ui.label(memory).classes('flex-1 text-gray-800 leading-relaxed').style(
'white-space: normal; word-wrap: break-word;')
def _select_template(self, template: CharacterTemplate):
"""Select a template and refresh the preview"""
self._selected_template = template
# self.main_content.refresh()
@staticmethod
def load_character_templates() -> List[CharacterTemplate]:
"""Load character templates from YAML files"""
characters_dir = 'character_templates'
characters_path = Path(characters_dir)
character_templates: List[CharacterTemplate] = []
if not characters_path.exists():
raise FileNotFoundError(f"Characters directory '{characters_dir}' not found")
if not characters_path.is_dir():
raise ValueError(f"'{characters_dir}' is not a directory")
# Find all YAML files
yaml_files = list(characters_path.glob("*.yaml")) + list(characters_path.glob("*.yml"))
if not yaml_files:
raise ValueError(f"No YAML files found in '{characters_dir}'")
for yaml_file in yaml_files:
try:
with open(yaml_file, 'r', encoding='utf-8') as file:
data = yaml.safe_load(file)
required_fields = ['observations', 'reflections', 'plans']
missing_fields = [field for field in required_fields if field not in data]
if missing_fields:
print(f"Warning: File '{yaml_file.name}' missing fields: {missing_fields}")
continue
# Create template
character_templates.append(CharacterTemplate(
name=str(yaml_file.stem).replace('_', ' ').title(),
observations=data.get('observations', []),
reflections=data.get('reflections', []),
plans=data.get('plans', []),
yaml_file=yaml_file.name
))
except Exception as e:
print(f"Error loading '{yaml_file.name}': {e}")
continue
return character_templates
def get_selected_template(self) -> Optional[CharacterTemplate]:
"""Get the currently selected template"""
return self._selected_template
class CreationStep(Enum):
STARTING = "starting"
OBSERVATIONS = "observations"
REFLECTIONS = "reflections"
PLANS = "plans"
OVERVIEW = "overview"
COMPLETE = "complete"
class CharacterCreationDialog(AsyncElement):
_template: CharacterTemplate
_character: Optional[Character]
_character_agent: CharacterAgent
_current_step: CreationStep
_steps_completed: set[CreationStep]
_creation_task: Optional[asyncio.Task]
async def build(self, template: CharacterTemplate, *args, **kwargs) -> None:
self._template = template
print(template)
self._character = None
self._current_step = CreationStep.STARTING
self._steps_completed = set()
self._creation_task = None
with self.element:
with ui.card().classes('w-96 mx-auto shadow-lg'):
await self.character_creation_view()
# Start creation automatically
self._creation_task = asyncio.create_task(self._create_character_internal())
@ui.refreshable
async def character_creation_view(self):
if self._character:
await self._completion_view()
return
await self._progress_view()
async def _progress_view(self):
# Header
with ui.row().classes('w-full items-center mb-6 p-4 bg-gradient-to-r from-blue-50 to-purple-50 rounded-lg'):
ui.icon('psychology', size='2rem').classes('text-blue-600')
with ui.column().classes('flex-1 ml-3'):
ui.label(f'Creating {self._template["name"]}').classes('text-xl font-bold text-gray-800')
ui.label('Building character memories and personality...').classes('text-sm text-gray-600')
# Progress bar
steps = [CreationStep.OBSERVATIONS, CreationStep.REFLECTIONS, CreationStep.PLANS, CreationStep.OVERVIEW]
progress_value = (len(self._steps_completed) / len(steps)) * 100
ui.linear_progress(value=progress_value / 100).classes('mb-6')
ui.label(f'{len(self._steps_completed)}/{len(steps)} steps complete').classes(
'text-center text-sm text-gray-600 mb-4')
# Steps list
steps_info = [
(CreationStep.OBSERVATIONS, 'visibility', 'Loading Observations',
'Injecting basic memories and experiences'),
(CreationStep.REFLECTIONS, 'lightbulb', 'Processing Reflections',
'Generating deeper insights and understanding'),
(CreationStep.PLANS, 'track_changes', 'Installing Plans', 'Setting goals and future intentions'),
(CreationStep.OVERVIEW, 'summarize', 'Extracting Profile', 'Creating character overview with AI'),
]
with ui.list().classes('w-full space-y-2'):
for step, icon, title, description in steps_info:
await self._render_step_item(step, icon, title, description)
async def _render_step_item(self, step: CreationStep, icon: str, title: str, description: str):
# Status logic
if step in self._steps_completed:
status = 'complete'
icon_color = 'text-green-600'
bg_color = 'bg-green-50 border-green-200'
elif step == self._current_step:
status = 'current'
icon_color = 'text-blue-600'
bg_color = 'bg-blue-50 border-blue-200'
else:
status = 'pending'
icon_color = 'text-gray-400'
bg_color = 'bg-gray-50 border-gray-200'
with ui.item().classes(f'rounded-lg border-2 {bg_color} transition-all duration-300'):
with ui.row().classes('w-full items-center p-3'):
# Icon/Spinner
if status == 'complete':
ui.icon('check_circle').classes(f'{icon_color} text-2xl')
elif status == 'current':
ui.spinner(size='lg').classes('text-blue-600')
else:
ui.icon(icon).classes(f'{icon_color} text-2xl')
# Text
with ui.column().classes('flex-1 ml-4'):
ui.label(title).classes(
f'font-semibold {"text-green-800" if status == "complete" else "text-blue-800" if status == "current" else "text-gray-600"}')
ui.label(description).classes('text-sm text-gray-600 mt-1')
# Badge
if status == 'complete':
ui.badge('Done', color='positive').classes('ml-auto')
elif status == 'current':
ui.badge('Processing...', color='info').classes('ml-auto animate-pulse')
async def _completion_view(self):
with ui.column().classes('w-full items-center p-6'):
# Success icon
with ui.row().classes('items-center justify-center mb-6 p-4 bg-green-50 rounded-full w-20 h-20 mx-auto'):
ui.icon('celebration', size='3rem').classes('text-green-600')
ui.label(f'{self._character.name} is ready!').classes('text-2xl font-bold text-green-800 text-center mb-2')
ui.label('Character creation completed successfully').classes('text-gray-600 text-center mb-6')
# Character preview
with ui.card().classes('w-full bg-gradient-to-br from-green-50 to-blue-50 border border-green-200'):
with ui.card_section():
with ui.row().classes('items-start gap-4'):
with ui.avatar().classes('bg-gradient-to-br from-blue-400 to-purple-600 text-white text-xl'):
ui.label(self._character.name[0].upper())
with ui.column().classes('flex-1'):
ui.label(f'{self._character.name}, {self._character.age}').classes('font-bold text-lg')
ui.label(self._character.occupation).classes('text-blue-600 font-medium')
ui.label(self._character.personality).classes('text-gray-700 text-sm mt-1')
# Done button
with ui.row().classes('gap-3 mt-6'):
ui.button('Start Chatting', icon='chat').classes(
'bg-gradient-to-r from-blue-500 to-purple-600 text-white')
async def _create_character_internal(self):
"""Mock character creation with realistic timing"""
try:
# self._character = Character(name=self._template['name'])
# self._character_agent = CharacterAgent()
# Step 1: Observations
self._current_step = CreationStep.OBSERVATIONS
self.character_creation_view.refresh()
await asyncio.sleep(2.0) # Simulate loading observations
self._steps_completed.add(CreationStep.OBSERVATIONS)
self.character_creation_view.refresh()
await asyncio.sleep(0.3)
# Step 2: Reflections
self._current_step = CreationStep.REFLECTIONS
self.character_creation_view.refresh()
await asyncio.sleep(2.8) # Reflections take longer
self._steps_completed.add(CreationStep.REFLECTIONS)
self.character_creation_view.refresh()
await asyncio.sleep(0.3)
# Step 3: Plans
self._current_step = CreationStep.PLANS
self.character_creation_view.refresh()
await asyncio.sleep(1.5)
self._steps_completed.add(CreationStep.PLANS)
self.character_creation_view.refresh()
await asyncio.sleep(0.3)
# Step 4: AI Overview Extraction
self._current_step = CreationStep.OVERVIEW
self.character_creation_view.refresh()
await asyncio.sleep(3.2) # AI takes longest
# Mock character creation
self._character = self._create_mock_character()
self._steps_completed.add(CreationStep.OVERVIEW)
self._current_step = CreationStep.COMPLETE
self.character_creation_view.refresh()
except Exception as e:
ui.notify(f'Error creating character: {str(e)}', color='negative')
def _create_mock_character(self) -> Character:
"""Create mock character based on template"""
mock_personalities = [
"Shy and thoughtful with hidden depths",
"Confident and outgoing tech enthusiast",
"Creative and energetic with artistic flair"
]
return Character(
name=self._template["name"],
age=random.randint(22, 35),
personality=random.choice(mock_personalities),
occupation=f"{self._template['name']}'s profession",
location="Coffee shop"
)
def get_created_character(self) -> Optional[Character]:
return self._character

View File

@@ -1,129 +1,111 @@
import os
from typing import Optional
from nicegui import ui
from typing import Optional, List, Dict
from nicegui import ui, binding
from components import AsyncElement
from llm_connector import LLMBackend
from living_agents import Character
from .character import CharacterCard
from .scene_control import SceneControl
from .conversation_history import ConversationHistory
from .create_character import CharacterTemplateImportDialog, CharacterCreationDialog
from living_agents import RoleplaySystem, Character, LLMAgent
from utils import load_characters
import logging
logger = logging.getLogger(__name__)
class MainPage(AsyncElement):
backend: LLMBackend
scene_manager = None # Will hold SceneManager instance
selected_character: Optional[str] = None
memory_viewer = None
system: RoleplaySystem
selected_character: Optional[Character] = None
chat_container = None
scene_info_container = None
characters: List[Character]
initial_memories: Dict[Character, List[str]]
memory_filter: ui.select
is_generating = binding.BindableProperty()
async def setup_characters(self):
self.is_generating = True
await self.system.setup_characters(self.characters)
for char, memories in self.initial_memories.items():
for memory in memories:
await self.system.agents[char].perceive(memory)
self.is_generating = False
async def build(self): # pylint: disable=W0221
backend: LLMBackend = {'base_url': os.environ['BACKEND_BASE_URL'],
'api_token': os.environ['BACKEND_API_TOKEN'],
'model': os.environ['BACKEND_MODEL']}
loaded_characters = load_characters()
self.backend = backend
# Initialize mock scene manager (will be replaced with real one)
await self._initialize_scene()
self.characters = [char for char in loaded_characters]
self.initial_memories = {char: memories for char, memories in loaded_characters.items()}
self.system = RoleplaySystem()
# Header
with ui.header().classes('bg-gradient-to-r from-purple-600 to-indigo-600 text-white'):
ui.label('🎭 Living Agents').classes('text-2xl font-bold')
ui.label('Multi-Agent Roleplay with Stanford Memory Architecture').classes(
'text-sm opacity-90')
with self.element:
with ui.row().classes(
'bg-gradient-to-r from-purple-600 to-indigo-600 text-white q-pa-sm items-center w-full'):
ui.label('🎭 Living Agents').classes('text-2xl font-bold')
ui.label('Multi-Agent Roleplay with Stanford Memory Architecture').classes('text-sm opacity-90')
self.classes('w-full')
with self:
# Top Row: Characters
with ui.row().classes('w-full gap-2 items-center'):
for char in self.characters:
(await CharacterCard.create(ui.card, char)) \
.classes('p-2 cursor-pointer hover:bg-gray-50 gap-1') \
.on('click', lambda c=char: self._select_character(c))
ui.space()
ui.button(icon='add', on_click=self._open_character_creation).props('fab')
# Main container with three columns
with ui.row().classes('w-full p-4 gap-4'):
# Selected Character
with ui.row().classes('w-full flex-1'):
# Character card
with ui.card().classes('h-full max-w-96'):
await self._character_view() # type: ignore
with ui.row().classes('w-full items-center mb-2'):
ui.label('🧠 Memories').classes('text-lg font-bold')
ui.space()
# Memory type filter
self.memory_filter = ui.select(['all', 'observation', 'reflection', 'plan'], value='all',
on_change=self._memory_view.refresh).props(
'dense outlined').classes('w-24')
await self._memory_view() # type: ignore
# Conversation History (takes full height)
with ui.card().classes('flex-1 h-full'):
ui.label('🗨️ Conversation History').classes('text-lg font-bold')
# Left Panel - Scene Control & Characters
with ui.column().classes('w-1/4 gap-4'):
# Scrollable chat container - takes remaining height
with ui.scroll_area().classes('w-full').style('height: calc(100% - 3rem)'):
self.chat_container = ui.column().classes('w-full gap-2')
with self.chat_container:
# Welcome message
with ui.chat_message(name='System', sent=False).classes('w-full'):
ui.label(
'Welcome to the Living Agents roleplay system! ''Select a character and start interacting.').classes(
'text-sm')
# Scene Information Card
# ui.timer(0.5, self.setup_characters, once=True)
# Main container as a column
with ui.row().classes('w-full'):
with ui.column().classes('flex-1 bg-red-200 h-full'):
# Interactions Card
with ui.card().classes('w-full'):
ui.label('📍 Scene Control').classes('text-lg font-bold mb-2')
self.scene_info_container = ui.column().classes('w-full gap-2')
with self.scene_info_container:
self._create_scene_info()
ui.separator()
# Time controls
with ui.row().classes('w-full gap-2 mt-2'):
ui.button('⏰ +1 Hour', on_click=lambda: self._advance_time(1)).classes('flex-1')
ui.button('📅 +1 Day', on_click=lambda: self._advance_time(24)).classes('flex-1')
# Characters List
with ui.card().classes('w-full'):
ui.label('👥 Characters').classes('text-lg font-bold mb-2')
# Character cards
with ui.column().classes('w-full gap-2'):
# Alice
with ui.card().classes('w-full p-3 cursor-pointer hover:bg-gray-50').on(
'click', lambda: self._select_character('Alice')):
with ui.row().classes('items-center gap-2'):
ui.icon('person', size='sm').classes('text-purple-500')
with ui.column().classes('flex-1'):
ui.label('Alice').classes('font-semibold')
ui.label('Literature Student, 23').classes(
'text-xs text-gray-500')
with ui.row().classes('gap-1 mt-1'):
ui.badge('📚 10 memories', color='purple').classes('text-xs')
ui.badge('💭 0 reflections', color='indigo').classes('text-xs')
# Bob
with ui.card().classes('w-full p-3 cursor-pointer hover:bg-gray-50').on(
'click', lambda: self._select_character('Bob')):
with ui.row().classes('items-center gap-2'):
ui.icon('person', size='sm').classes('text-blue-500')
with ui.column().classes('flex-1'):
ui.label('Bob').classes('font-semibold')
ui.label('Software Developer, 28').classes(
'text-xs text-gray-500')
with ui.row().classes('gap-1 mt-1'):
ui.badge('📚 8 memories', color='purple').classes('text-xs')
ui.badge('💭 0 reflections', color='indigo').classes('text-xs')
# Emma
with ui.card().classes('w-full p-3 cursor-pointer hover:bg-gray-50').on(
'click', lambda: self._select_character('Emma')):
with ui.row().classes('items-center gap-2'):
ui.icon('person', size='sm').classes('text-pink-500')
with ui.column().classes('flex-1'):
ui.label('Emma').classes('font-semibold')
ui.label('Barista & Artist, 25').classes(
'text-xs text-gray-500')
with ui.row().classes('gap-1 mt-1'):
ui.badge('📚 7 memories', color='purple').classes('text-xs')
ui.badge('💭 0 reflections', color='indigo').classes('text-xs')
# Character Summary - moved here to be under Characters
with ui.card().classes('w-full'):
ui.label('📝 Character Summary').classes('text-lg font-bold mb-2')
self.character_summary = ui.column().classes('w-full')
with self.character_summary:
ui.label('Select a character to see their summary').classes(
'text-sm text-gray-500 italic')
# Middle Panel - Interaction & Chat
with ui.column().classes('w-1/2 gap-4'):
# Interaction Controls
with ui.card().classes('w-full'):
ui.label('💬 Interactions').classes('text-lg font-bold mb-2')
ui.label('💬 Interactions').classes('text-lg font-bold mb-1')
# Character-to-User interaction
with ui.column().classes('w-full gap-2'):
with ui.column().classes('w-full gap-1'):
ui.label('Talk to Character').classes('font-semibold text-sm')
with ui.row().classes('w-full gap-2'):
with ui.row().classes('w-full gap-1'):
self.user_input = ui.input(
placeholder='Say something to the selected character...'
).classes('flex-1')
placeholder='Say something to the selected character...').classes('flex-1').props(
'dense')
ui.button('Send', on_click=self._send_to_character).props(
'icon=send color=primary')
'icon=send color=primary').bind_enabled_from(self, 'is_generating',
backward=lambda v: not v)
ui.separator()
@@ -131,153 +113,101 @@ class MainPage(AsyncElement):
with ui.column().classes('w-full gap-2 mt-2'):
ui.label('Character Interaction').classes('font-semibold text-sm')
with ui.row().classes('w-full gap-2'):
self.char1_select = ui.select(
['Alice', 'Bob', 'Emma'],
label='Character 1',
value='Alice'
).classes('flex-1')
self.char2_select = ui.select(
['Alice', 'Bob', 'Emma'],
label='Character 2',
value='Bob'
).classes('flex-1')
self.interaction_context = ui.input(
placeholder='Context for interaction...'
).classes('w-full')
ui.button(
'Make them interact',
on_click=self._character_interaction
).props('icon=forum color=secondary').classes('w-full')
self.char1_select = ui.select(['Alice', 'Bob', 'Emma'], label='Character 1',
value='Alice').classes('flex-1').props('dense')
self.char2_select = ui.select(['Alice', 'Bob', 'Emma'], label='Character 2',
value='Bob').classes('flex-1').props('dense')
self.interaction_context = ui.input(placeholder='Context for interaction...').classes(
'w-full').props('dense')
ui.button('Make them interact', on_click=self._character_interaction).props(
'icon=forum color=secondary').classes('w-full').bind_enabled_from(self, 'is_generating',
backward=lambda
v: not v)
# Bottom Row: Scene Control, Interaction
with ui.card().classes('h-full'):
ui.label('📍 Scene Control').classes('text-lg font-bold mb-1')
# Chat History
with ui.card().classes('w-full flex-1'):
with ui.row().classes('w-full items-center mb-2'):
ui.label('🗨️ Conversation History').classes('text-lg font-bold')
ui.space()
ui.button(icon='delete', on_click=self._clear_chat).props('flat round size=sm')
with ui.row().classes('w-full'):
with ui.column().classes('gap-2'):
with ui.row().classes('justify-between'):
ui.label('Location:').classes('text-sm font-semibold')
ui.label('Cozy Coffee Shop').classes('text-sm')
with ui.row().classes('justify-between'):
ui.label('Time:').classes('text-sm font-semibold')
ui.label('2:30 PM').classes('text-sm')
with ui.row().classes('justify-between'):
ui.label('Atmosphere:').classes('text-sm font-semibold')
ui.label('Quiet and peaceful').classes('text-sm')
ui.space()
with ui.column().classes('gap-2'):
ui.button('⏰ +1 Hour').classes('flex-1').bind_enabled_from(self, 'is_generating',
backward=lambda v: not v)
ui.button('📅 +1 Day').classes('flex-1').bind_enabled_from(self, 'is_generating',
backward=lambda v: not v)
# Scrollable chat container
with ui.scroll_area().classes('w-full h-96 border rounded p-2'):
self.chat_container = ui.column().classes('w-full gap-2')
with self.chat_container:
# Welcome message
with ui.chat_message(name='System', sent=False).classes('w-full'):
ui.label(
'Welcome to the Living Agents roleplay system! '
'Select a character and start interacting.'
).classes('text-sm')
# move outside of page generation
# ui.timer(0.5, self.setup_characters, once=True)
# Right Panel - Memory Stream
with ui.column().classes('w-1/4 gap-4'):
async def _open_character_creation(self):
# with ui.dialog() as dialog, ui.card():
# (await CharacterCreationDialog.create(ui.column)).classes('w-full')
dialog = (await CharacterTemplateImportDialog.create(ui.dialog)).classes('w-full')
result = await dialog
if result:
dialog = (await CharacterCreationDialog.create(ui.dialog, template=result)).classes('w-full').props(
'persistent')
result = await dialog
print(result)
# Memory Stream Viewer
with ui.card().classes('w-full flex-1'):
with ui.row().classes('w-full items-center mb-2'):
ui.label('🧠 Memory Stream').classes('text-lg font-bold')
ui.space()
# Memory type filter
self.memory_filter = ui.select(
['all', 'observation', 'reflection', 'plan'],
value='all',
on_change=self._update_memory_view
).props('dense outlined').classes('w-24')
# Scrollable memory list
with ui.scroll_area().classes('w-full h-96 border rounded p-2'):
self.memory_viewer = ui.column().classes('w-full gap-2')
with self.memory_viewer:
ui.label('Select a character to view memories').classes('text-sm text-gray-500 italic')
# Footer with stats
with ui.footer().classes('bg-gray-100 text-gray-600 text-sm'):
with ui.row().classes('w-full justify-center items-center gap-4'):
ui.label('🎯 Stanford Memory Architecture')
ui.label('|')
self.stats_label = ui.label('Total Memories: 0 | Reflections: 0')
ui.label('|')
ui.label('⚡ Powered by Custom LLM Connector')
def _create_scene_info(self):
"""Create scene information display"""
with ui.row().classes('w-full justify-between'):
ui.label('Location:').classes('text-sm font-semibold')
ui.label('Cozy Coffee Shop').classes('text-sm')
with ui.row().classes('w-full justify-between'):
ui.label('Time:').classes('text-sm font-semibold')
ui.label('2:30 PM').classes('text-sm')
with ui.row().classes('w-full justify-between'):
ui.label('Atmosphere:').classes('text-sm font-semibold')
ui.label('Quiet and peaceful').classes('text-sm')
async def _initialize_scene(self):
"""Initialize the scene with mock data (will be replaced with real SceneManager)"""
# This will be replaced with actual SceneManager initialization
ui.notify('🎬 Scene initialized with 3 characters', type='positive')
async def _select_character(self, character_name: str):
async def _select_character(self, character: Character):
"""Select a character and update UI"""
self.selected_character = character_name
ui.notify(f'Selected: {character_name}', type='info')
self.selected_character = character
self._character_view.refresh() # type: ignore
# Update character summary
self.character_summary.clear()
with self.character_summary:
ui.label(f'{character_name}').classes('font-bold text-lg')
self._memory_view.refresh()
@ui.refreshable
async def _character_view(self):
with ui.column().classes('w-full gap-0'):
if self.selected_character is None:
ui.label('Select a character to see their summary and Memories').classes(
'text-sm text-gray-500 italic')
return
ui.label(f'{self.selected_character.name}').classes('font-bold text-lg')
ui.separator()
with ui.column().classes('gap-0'):
ui.label(f'Age: {self.selected_character.age}').classes('text-sm')
ui.label(f'Occupation: {self.selected_character.occupation}').classes('text-sm')
ui.label(f'Personality: {self.selected_character.personality}').classes('text-sm')
ui.separator()
ui.label('Goals').classes('font-bold text-lg')
with ui.list().props('dense separator'):
for goal in self.selected_character.goals:
ui.item(goal)
if character_name == 'Alice':
ui.label('Age: 23').classes('text-sm')
ui.label('Occupation: Graduate student').classes('text-sm')
ui.label('Personality: Introverted, observant, loves mystery novels').classes('text-sm mt-2')
ui.label('Current Goal: Finish thesis chapter').classes('text-sm text-blue-600 mt-2')
elif character_name == 'Bob':
ui.label('Age: 28').classes('text-sm')
ui.label('Occupation: Senior Developer').classes('text-sm')
ui.label('Personality: Confident, helpful, technical').classes('text-sm mt-2')
ui.label('Current Goal: Launch new feature').classes('text-sm text-blue-600 mt-2')
elif character_name == 'Emma':
ui.label('Age: 25').classes('text-sm')
ui.label('Occupation: Barista & Art Student').classes('text-sm')
ui.label('Personality: Energetic, social, creative').classes('text-sm mt-2')
ui.label('Current Goal: Organize art show').classes('text-sm text-blue-600 mt-2')
# Update memory viewer
await self._update_memory_view()
async def _update_memory_view(self):
@ui.refreshable
async def _memory_view(self):
"""Update the memory stream viewer"""
if not self.selected_character:
return
self.memory_viewer.clear()
with self.memory_viewer:
# Mock memories for demonstration
memories = [
('observation', 'Arrived at the coffee shop', 8, '10:00 AM'),
('observation', 'Ordered my usual latte', 3, '10:05 AM'),
('observation', 'Saw a familiar face by the window', 6, '10:30 AM'),
('reflection', 'I seem to come here when I need to focus', 7, '11:00 AM'),
('plan', 'Work on thesis for 2 hours', 5, '11:30 AM'),
]
filter_type = self.memory_filter.value
for mem_type, description, importance, time in memories:
if filter_type == 'all' or filter_type == mem_type:
with ui.card().classes('w-full p-2'):
with ui.row().classes('w-full items-start gap-2'):
# Memory type icon
if mem_type == 'observation':
ui.icon('visibility', size='xs').classes('text-blue-500 mt-1')
elif mem_type == 'reflection':
ui.icon('psychology', size='xs').classes('text-purple-500 mt-1')
else:
ui.icon('event', size='xs').classes('text-green-500 mt-1')
with ui.column().classes('flex-1'):
ui.label(description).classes('text-sm')
with ui.row().classes('gap-2 mt-1'):
ui.badge(f'{importance}', color='orange').classes('text-xs')
ui.label(time).classes('text-xs text-gray-500')
if self.selected_character:
with ui.scroll_area().classes('w-full border rounded p-0 flex-1'):
with ui.column().classes('gap-2'):
memories = self.system.get_character_memories(self.selected_character,
self.memory_filter.value if self.memory_filter.value else 'all')
for memory in memories:
with ui.card().classes('w-full p-1'):
with ui.row().classes('gap-2'):
with ui.column().classes('flex-1'):
ui.label(memory.description).classes('text-sm')
with ui.row().classes('gap-2 mt-1 items-center'):
if memory.memory_type == 'observation':
ui.icon('visibility', size='xs').classes('text-blue-500 mt-1')
elif memory.memory_type == 'reflection':
ui.icon('psychology', size='xs').classes('text-purple-500 mt-1')
else:
ui.icon('event', size='xs').classes('text-green-500 mt-1')
ui.badge(f'{memory.importance_score}', color='orange').classes('text-xs')
ui.label(memory.creation_time.strftime('%Y-%m-%d %H:%M:%S')).classes(
'text-xs text-gray-500')
async def _send_to_character(self):
"""Send message to selected character"""
@@ -298,7 +228,7 @@ class MainPage(AsyncElement):
# Mock response (will be replaced with actual agent response)
with self.chat_container:
with ui.chat_message(name=self.selected_character, sent=False).classes('w-full'):
with ui.chat_message(name=self.selected_character.name, sent=False).classes('w-full'):
spinner = ui.spinner('dots')
# Simulate thinking
@@ -314,11 +244,17 @@ class MainPage(AsyncElement):
parent = spinner.parent_slot.parent
with parent:
if self.selected_character == 'Alice':
ui.label("*nervously adjusts glasses* Oh, um, hello there. I was just working on my thesis chapter about Victorian gothic literature. The coffee here helps me concentrate.").classes('text-sm')
ui.label(
"*nervously adjusts glasses* Oh, um, hello there. I was just working on my thesis chapter about Victorian gothic literature. The coffee here helps me concentrate.").classes(
'text-sm')
elif self.selected_character == 'Bob':
ui.label("Hey! Yeah, I'm actually debugging some code right now. This new feature is giving me some trouble, but I think I'm close to solving it. How's your day going?").classes('text-sm')
ui.label(
"Hey! Yeah, I'm actually debugging some code right now. This new feature is giving me some trouble, but I think I'm close to solving it. How's your day going?").classes(
'text-sm')
else:
ui.label("Hi! Welcome to our little coffee shop! I just finished a new sketch during my break - been trying to capture the afternoon light through the windows. Can I get you anything?").classes('text-sm')
ui.label(
"Hi! Welcome to our little coffee shop! I just finished a new sketch during my break - been trying to capture the afternoon light through the windows. Can I get you anything?").classes(
'text-sm')
async def _character_interaction(self):
"""Make two characters interact"""
@@ -356,7 +292,8 @@ class MainPage(AsyncElement):
# Add time advancement to chat
with self.chat_container:
with ui.chat_message(name='System', sent=False).classes('w-full'):
ui.label(f'⏰ Time advanced by {hours} hour(s). Characters update their plans...').classes('text-sm italic text-gray-600')
ui.label(f'⏰ Time advanced by {hours} hour(s). Characters update their plans...').classes(
'text-sm italic text-gray-600')
def _clear_chat(self):
"""Clear chat history"""

12
pages/scene_control.py Normal file
View File

@@ -0,0 +1,12 @@
from nicegui import ui
from components import AsyncElement
from living_agents import Character
class SceneControl(AsyncElement):
async def build(self) -> None:
self.classes('w-full')
with self:
pass

View File

@@ -6,4 +6,7 @@ readme = "README.md"
requires-python = ">=3.12"
dependencies = [
"nicegui>=2.23.3",
"openai>=1.102.0",
"scikit-learn>=1.7.1",
"tqdm>=4.67.1",
]

3
utils/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from .character_loader import load_characters
__all__ = ['load_characters']

77
utils/character_loader.py Normal file
View File

@@ -0,0 +1,77 @@
import yaml
from pathlib import Path
from typing import List, Dict
from living_agents import Character
# def load_characters(characters_dir: str = "characters") -> List[Tuple[Character, List[str]]]:
def load_characters(characters_dir: str = "characters") -> Dict[Character, List[str]]:
"""
Load all character YAML files from the specified directory.
Args:
characters_dir (str): Path to the directory containing character YAML files
Returns:
List[Character]: List of Character objects loaded from YAML files
Raises:
FileNotFoundError: If the characters directory doesn't exist
yaml.YAMLError: If there's an error parsing a YAML file
ValueError: If a YAML file is missing required fields
"""
characters = {}
characters_path = Path(characters_dir)
if not characters_path.exists():
raise FileNotFoundError(f"Characters directory '{characters_dir}' not found")
if not characters_path.is_dir():
raise ValueError(f"'{characters_dir}' is not a directory")
# Find all YAML files in the directory
yaml_files = list(characters_path.glob("*.yaml")) + list(characters_path.glob("*.yml"))
if not yaml_files:
print(f"No YAML files found in '{characters_dir}'")
return characters
for yaml_file in yaml_files:
try:
with open(yaml_file, 'r', encoding='utf-8') as file:
data = yaml.safe_load(file)
if data is None:
print(f"Warning: Empty YAML file '{yaml_file.name}', skipping")
continue
# Validate required fields
required_fields = ['name', 'age', 'personality', 'occupation', 'location']
missing_fields = [field for field in required_fields if field not in data]
if missing_fields:
raise ValueError(f"File '{yaml_file.name}' missing required fields: {missing_fields}")
# Create Character object
character = Character(
name=data['name'],
age=data['age'],
personality=data['personality'],
occupation=data['occupation'],
location=data['location'],
relationships=data.get('relationships', {}),
goals=data.get('goals', [])
)
initialize_memories = data.get('initialize_memories', [])
characters[character] = initialize_memories
# characters.append((character, initialize_memories))
print(f"Loaded character: {character.name} from {yaml_file.name}")
except yaml.YAMLError as e:
print(f"Error parsing YAML file '{yaml_file.name}': {e}")
except Exception as e:
print(f"Error loading character from '{yaml_file.name}': {e}")
return characters

1370
uv.lock generated

File diff suppressed because it is too large Load Diff