Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...




// prisma/schema.prisma
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
}npm run prisma:migrate// OpenAI GPT-4
await llmUtils.getTextFromLLM(prompt, "openai/gpt-4");
// Anthropic Claude
await llmUtils.getTextFromLLM(prompt, "anthropic/claude-3-sonnet");
// Google PaLM
await llmUtils.getTextFromLLM(prompt, "google/palm-2");async function withRetry(fn, maxRetries = 3) {
let retries = 0;
while (retries < maxRetries) {
try {
return await fn();
} catch (error) {
if (!error.message.includes("rate limit")) throw error;
retries++;
await new Promise((r) => setTimeout(r, Math.pow(2, retries) * 1000));
}
}
throw new Error("Max retries exceeded");
}TWITTER_DRY_RUN=true# Clone the repository
git clone <your-repo>
cd liz
# Install dependencies
pnpm install# Database configuration (choose one)
DATABASE_URL="postgresql://user:password@localhost:5432/dbname"
# Or for SQLite:
DATABASE_URL="file:./prisma/dev.db"
# LLM API Keys
OPENAI_API_KEY="your-openai-api-key"
OPENROUTER_API_KEY="your-openrouter-api-key"
# Application URL (required for OpenRouter)
APP_URL="http://localhost:3000"# Initialize the database
npm run init-dbimport { Character } from "../types";
import { BaseAgent } from "../agent";
const assistantCharacter: Character = {
name: "Assistant",
agentId: "assistant_1",
system: "You are a helpful assistant.",
bio: ["A knowledgeable AI assistant"],
lore: ["Created to help users with various tasks"],
messageExamples: [
[
{ user: "user1", content: { text: "Hello!" } },
{ user: "Assistant", content: { text: "Hi! How can I help?" } },
],
],
postExamples: [],
topics: ["general help", "task assistance"],
style: {
all: ["helpful", "friendly"],
chat: ["conversational"],
post: ["clear", "concise"],
},
adjectives: ["helpful", "knowledgeable"],
routes: [],
};
export const assistant = new BaseAgent(assistantCharacter);import express from "express";
import { AgentFramework } from "./framework";
import { standardMiddleware } from "./middleware";
import { assistant } from "./agents/assistant";
import { InputSource, InputType } from "./types";
const app = express();
app.use(express.json());
const framework = new AgentFramework();
standardMiddleware.forEach((middleware) => framework.use(middleware));
app.post("/agent/input", (req, res) => {
const input = {
source: InputSource.NETWORK,
userId: req.body.userId,
agentId: assistant.getAgentId(),
roomId: `room_${req.body.userId}`,
type: InputType.TEXT,
text: req.body.text,
};
framework.process(input, assistant, res);
});
app.listen(3000, () => {
console.log("Server running on http://localhost:3000");
});# Start the development server
npm run devcurl -X POST http://localhost:3000/agent/input \
-H "Content-Type: application/json" \
-d '{
"userId": "test_user",
"text": "Hello, assistant!"
}'import { Character } from "../types";
import { BaseAgent } from "../agent";
const businessAdvisor: Character = {
name: "Stern",
agentId: "stern_advisor",
system:
"You are Stern, a no-nonsense business advisor known for direct, practical advice.",
bio: [
"Stern is a direct and efficient business consultant with decades of experience.",
"Started as a factory floor manager before rising to consultant status.",
],
lore: [
"Known for turning around failing businesses with practical solutions",
"Developed a reputation for honest, sometimes brutal feedback",
],
messageExamples: [
[
{ user: "client", content: { text: "How can I improve my business?" } },
{
user: "Stern",
content: { text: "Specifics. What are your current metrics?" },
},
],
],
postExamples: [
"Here's a 5-step plan to optimize your operations...",
"Three critical mistakes most startups make:",
],
topics: ["business", "strategy", "efficiency", "management"],
style: {
all: ["direct", "professional", "analytical"],
chat: ["focused", "solution-oriented"],
post: ["structured", "actionable"],
},
adjectives: ["efficient", "practical", "experienced"],
routes: [],
};
export const stern = new BaseAgent(businessAdvisor);// Basic conversation route
stern.addRoute({
name: "conversation",
description: "Handle natural conversation about business topics",
handler: async (context, req, res) => {
const response = await llmUtils.getTextFromLLM(
context,
"anthropic/claude-3-sonnet"
);
await res.send(response);
},
});
// Specialized business analysis route
stern.addRoute({
name: "analyze_metrics",
description: "Analyze business metrics and provide recommendations",
handler: async (context, req, res) => {
const analysis = await llmUtils.getObjectFromLLM(
context,
analysisSchema,
LLMSize.LARGE
);
await res.send(analysis);
},
});// Get the agent's system prompt
const systemPrompt = agent.getSystemPrompt();
// Example system prompt structure
const systemPrompt = `You are ${character.name}, ${character.system}
Key Characteristics:
${character.adjectives.join(", ")}
Style Guidelines:
- All interactions: ${character.style.all.join(", ")}
- Chat responses: ${character.style.chat.join(", ")}
- Public posts: ${character.style.post.join(", ")}
Areas of Focus:
${character.topics.join(", ")}`;// Get the full agent context
const context = agent.getAgentContext();
// Context structure
<SYSTEM_PROMPT>
[System prompt as shown above]
</SYSTEM_PROMPT>
<BIO_CONTEXT>
[Random selection from bio array]
</BIO_CONTEXT>
<LORE_CONTEXT>
[Random selection from lore array]
</LORE_CONTEXT>
<MESSAGE_EXAMPLES>
[Selected conversation examples]
</MESSAGE_EXAMPLES>
<POST_EXAMPLES>
[Selected post examples]
</POST_EXAMPLES>
<STYLE_GUIDELINES>
[Style preferences for different interaction types]
</STYLE_GUIDELINES>// Initialize LLMUtils
import { LLMUtils } from "../utils/llm";
const llmUtils = new LLMUtils();
// Environment variables needed
OPENAI_API_KEY = "your-openai-api-key";
OPENROUTER_API_KEY = "your-openrouter-api-key";
APP_URL = "http://localhost:3000"; // Required for OpenRouter// Basic text generation
const response = await llmUtils.getTextFromLLM(
prompt,
"anthropic/claude-3-sonnet"
);
// Streaming responses
await llmUtils.getTextFromLLMStream(
prompt,
"anthropic/claude-3-sonnet",
(token) => {
// Handle each token as it arrives
console.log(token);
}
);import { z } from "zod";
import { LLMSize } from "../types";
// Define your schema
const analysisSchema = z.object({
sentiment: z.string(),
topics: z.array(z.string()),
confidence: z.number(),
summary: z.string(),
});
// Get structured response
const analysis = await llmUtils.getObjectFromLLM(
prompt,
analysisSchema,
LLMSize.LARGE
);
// Type-safe access to fields
console.log(analysis.sentiment);
console.log(analysis.topics);// Get boolean response
const shouldRespond = await llmUtils.getBooleanFromLLM(
"Should the agent respond to this message?",
LLMSize.SMALL
);
if (shouldRespond) {
// Handle response
}// Get image descriptions
const description = await llmUtils.getImageDescriptions(imageUrls);
// Analyze images with text context
const response = await llmUtils.getTextWithImageFromLLM(
prompt,
imageUrls,
"anthropic/claude-3-sonnet"
);
// Get structured output from images
const analysis = await llmUtils.getObjectFromLLMWithImages(
prompt,
analysisSchema,
imageUrls,
LLMSize.LARGE
);
import { AgentFramework } from "./framework";
import { standardMiddleware } from "./middleware";
const framework = new AgentFramework();
// Add middleware
standardMiddleware.forEach((middleware) => framework.use(middleware));
// Process requests
framework.process(input, agent, res);// Adding a route to an agent
agent.addRoute({
name: "conversation",
description: "Handle natural conversation",
handler: async (context, req, res) => {
const response = await llmUtils.getTextFromLLM(
context,
"anthropic/claude-3-sonnet"
);
await res.send(response);
},
});1. Client sends request to /agent/input
↓
2. validateInput checks required fields
↓
3. loadMemories fetches conversation history
↓
4. wrapContext builds prompt with memories
↓
5. createMemoryFromInput stores request
↓
6. router selects appropriate handler
↓
7. handler processes request with LLM
↓
8. Response sent back to client

// prisma/schema.prisma
datasource db {
provider = "sqlite" // or "postgresql"
url = env("DATABASE_URL")
}
generator client {
provider = "prisma-client-js"
}
model Memory {
id String @id @default(uuid())
userId String
agentId String
roomId String
content String // Stores JSON as string
type String
generator String // "llm" or "external"
createdAt DateTime @default(now())
@@index([roomId])
@@index([userId, agentId])
@@index([type])
}
model Tweet {
id String @id
text String
userId String
username String
conversationId String?
inReplyToId String?
createdAt DateTime @default(now())
permanentUrl String?
@@index([userId])
@@index([conversationId])
}// src/middleware/load-memories.ts
export function createLoadMemoriesMiddleware(
options: LoadMemoriesOptions = {}
): AgentMiddleware {
const { limit = 100 } = options;
return async (req, res, next) => {
const memories = await prisma.memory.findMany({
where: {
userId: req.input.userId,
},
orderBy: {
createdAt: "desc",
},
take: limit,
});
req.memories = memories.map((memory) => ({
id: memory.id,
userId: memory.userId,
agentId: memory.agentId,
roomId: memory.roomId,
type: memory.type,
createdAt: memory.createdAt,
generator: memory.generator,
content: JSON.parse(memory.content),
}));
await next();
};
}// src/middleware/create-memory.ts
export const createMemoryFromInput: AgentMiddleware = async (
req,
res,
next
) => {
await prisma.memory.create({
data: {
userId: req.input.userId,
agentId: req.input.agentId,
roomId: req.input.roomId,
type: req.input.type,
generator: "external",
content: JSON.stringify(req.input),
},
});
await next();
};
// Creating LLM response memories
await prisma.memory.create({
data: {
userId: req.input.userId,
agentId: req.input.agentId,
roomId: req.input.roomId,
type: "agent",
generator: "llm",
content: JSON.stringify({ text: response }),
},
});// src/middleware/wrap-context.ts
function formatMemories(memories: Memory[]): string {
return memories
.reverse()
.map((memory) => {
const content = memory.content;
if (memory.generator === "external") {
return `[${memory.createdAt}] User ${memory.userId}: ${content.text}`;
} else if (memory.generator === "llm") {
return `[${memory.createdAt}] You: ${content.text}`;
}
})
.join("\n\n");
}
// Final context structure
<PREVIOUS_CONVERSATION>
${memories}
</PREVIOUS_CONVERSATION>
<AGENT_CONTEXT>
${agentContext}
</AGENT_CONTEXT>
<CURRENT_USER_INPUT>
${currentInput}
</CURRENT_USER_INPUT>// src/example/cli.ts
import express from "express";
import { AgentFramework } from "../framework";
import { standardMiddleware } from "../middleware";
import { Character, InputSource, InputType } from "../types";
import { BaseAgent } from "../agent";
import readline from "readline";
// Define your agent
const assistant: Character = {
name: "Assistant",
agentId: "cli_assistant",
system: "You are a helpful CLI assistant.",
bio: ["A command-line AI assistant"],
lore: ["Created to help users through the terminal"],
messageExamples: [
[
{ user: "user1", content: { text: "Hello!" } },
{ user: "Assistant", content: { text: "Hi! How can I help?" } },
],
],
postExamples: [],
topics: ["general help", "cli", "terminal"],
style: {
all: ["helpful", "concise"],
chat: ["friendly"],
post: ["clear"],
},
adjectives: ["helpful", "efficient"],
routes: [],
};
// Initialize framework
const app = express();
app.use(express.json());
const framework = new AgentFramework();
standardMiddleware.forEach((middleware) => framework.use(middleware));
// Create agent instance
const agent = new BaseAgent(assistant);
// Add conversation route
agent.addRoute({
name: "conversation",
description: "Handle natural conversation",
handler: async (context, req, res) => {
const response = await llmUtils.getTextFromLLM(
context,
"anthropic/claude-3-sonnet"
);
await res.send(response);
},
});
// Set up CLI interface
async function startCLI() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
console.log("\nCLI Assistant");
console.log("=============");
async function prompt() {
rl.question("\nYou: ", async (text) => {
try {
const response = await framework.process(
{
source: InputSource.NETWORK,
userId: "cli_user",
agentId: agent.getAgentId(),
roomId: "cli_session",
type: InputType.TEXT,
text: text,
},
agent
);
console.log("\nAssistant:", response);
prompt();
} catch (error) {
console.error("\nError:", error);
prompt();
}
});
}
prompt();
}
// Start server and CLI
const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
console.log(`Server running on http://localhost:${PORT}`);
startCLI();
});// src/example/twitter-bot.ts
import { TwitterClient } from "@liz/twitter-client";
import { Character } from "../types";
import { BaseAgent } from "../agent";
// Define Twitter bot character
const twitterBot: Character = {
name: "TechNews",
agentId: "tech_news_bot",
system:
"You are a tech news curator sharing insights about AI and technology.",
bio: ["AI-powered tech news curator"],
lore: ["Passionate about sharing tech insights"],
messageExamples: [
[
{ user: "user1", content: { text: "What's new in AI?" } },
{
user: "TechNews",
content: { text: "Here are the latest developments..." },
},
],
],
postExamples: [
"🚀 Breaking: New developments in quantum computing...",
"💡 AI Insight of the day: Understanding large language models...",
],
topics: ["AI", "technology", "programming", "tech news"],
style: {
all: ["informative", "engaging"],
chat: ["helpful", "knowledgeable"],
post: ["concise", "engaging"],
},
adjectives: ["tech-savvy", "insightful"],
routes: [],
};
// Create agent
const agent = new BaseAgent(twitterBot);
// Add tweet generation route
agent.addRoute({
name: "create_new_tweet",
description: "Generate a new tweet about tech news",
handler: async (context, req, res) => {
const tweet = await llmUtils.getTextFromLLM(
context,
"anthropic/claude-3-sonnet"
);
await res.send(tweet);
},
});
// Configure Twitter client
const config = {
username: process.env.TWITTER_USERNAME,
password: process.env.TWITTER_PASSWORD,
email: process.env.TWITTER_EMAIL,
twoFactorSecret: process.env.TWITTER_2FA_SECRET,
retryLimit: 3,
postIntervalHours: 4,
pollingInterval: 5,
dryRun: process.env.NODE_ENV !== "production",
};
// Start Twitter bot
async function startBot() {
const twitter = new TwitterClient(agent, config);
await twitter.start();
console.log("Twitter bot started!");
}
startBot().catch(console.error);// src/example/memory-agent.ts
import { AgentFramework } from "../framework";
import { standardMiddleware } from "../middleware";
import { Character, InputSource, InputType } from "../types";
import { BaseAgent } from "../agent";
import { prisma } from "../utils/db";
// Define memory-aware agent
const memoryAgent: Character = {
name: "Mentor",
agentId: "mentor_agent",
system:
"You are a mentor who remembers past conversations to provide personalized guidance.",
bio: ["An AI mentor with perfect memory"],
lore: ["Uses conversation history to give contextual advice"],
messageExamples: [],
postExamples: [],
topics: ["mentoring", "personal growth"],
style: {
all: ["personalized", "thoughtful"],
chat: ["empathetic"],
post: ["reflective"],
},
adjectives: ["understanding", "wise"],
routes: [],
};
const agent = new BaseAgent(memoryAgent);
// Add conversation route with memory context
agent.addRoute({
name: "conversation",
description: "Handle conversation with memory context",
handler: async (context, req, res) => {
// Get recent memories for this user
const memories = await prisma.memory.findMany({
where: {
userId: req.input.userId,
agentId: req.input.agentId,
},
orderBy: {
createdAt: "desc",
},
take: 10,
});
// Format memories for context
const memoryContext = memories
.map((m) => {
const content = JSON.parse(m.content);
return `[${m.createdAt}] ${content.text}`;
})
.join("\n");
// Add memory context to prompt
const promptWithMemory = `
Previous interactions:
${memoryContext}
Current conversation:
${context}`;
const response = await llmUtils.getTextFromLLM(
promptWithMemory,
"anthropic/claude-3-sonnet"
);
// Store response in memory
await prisma.memory.create({
data: {
userId: req.input.userId,
agentId: req.input.agentId,
roomId: req.input.roomId,
type: "response",
generator: "llm",
content: JSON.stringify({ text: response }),
},
});
await res.send(response);
},
});
// Initialize framework
const framework = new AgentFramework();
standardMiddleware.forEach((middleware) => framework.use(middleware));
// Example usage
async function chat(text: string) {
return framework.process(
{
source: InputSource.NETWORK,
userId: "example_user",
agentId: agent.getAgentId(),
roomId: "example_room",
type: InputType.TEXT,
text,
},
agent
);
}// src/middleware/sentiment-analysis.ts
import { AgentMiddleware } from "../types";
import { LLMUtils } from "../utils/llm";
const sentimentSchema = z.object({
sentiment: z.enum(["positive", "negative", "neutral"]),
confidence: z.number(),
explanation: z.string(),
});
export const analyzeSentiment: AgentMiddleware = async (req, res, next) => {
const llmUtils = new LLMUtils();
try {
const analysis = await llmUtils.getObjectFromLLM(
`Analyze the sentiment of this text: "${req.input.text}"`,
sentimentSchema,
LLMSize.SMALL
);
// Add sentiment to request context
req.sentiment = analysis;
await next();
} catch (error) {
await res.error(new Error(`Failed to analyze sentiment: ${error.message}`));
}
};
// Usage in framework
const framework = new AgentFramework();
framework.use(validateInput);
framework.use(analyzeSentiment); // Add sentiment analysis
framework.use(loadMemories);
framework.use(wrapContext);
framework.use(router);// Environment variables
TWITTER_USERNAME="your-username"
TWITTER_PASSWORD="your-password"
TWITTER_EMAIL="your-email"
TWITTER_2FA_SECRET="optional-2fa-secret"
TWITTER_POST_INTERVAL_HOURS=4
TWITTER_POLLING_INTERVAL=5 # minutes
TWITTER_DRY_RUN=true # For testing
// Configuration schema
const twitterConfigSchema = z.object({
username: z.string().min(1, "Twitter username is required"),
password: z.string().min(1, "Twitter password is required"),
email: z.string().email("Valid email is required"),
twoFactorSecret: z.string().optional(),
retryLimit: z.number().int().min(1).default(5),
postIntervalHours: z.number().int().min(1).default(4),
enableActions: z.boolean().default(false)
});
import { TwitterClient } from "@liz/twitter-client";
const config = {
username: process.env.TWITTER_USERNAME,
password: process.env.TWITTER_PASSWORD,
email: process.env.TWITTER_EMAIL,
twoFactorSecret: process.env.TWITTER_2FA_SECRET,
retryLimit: 3,
postIntervalHours: 4,
pollingInterval: 5,
dryRun: process.env.NODE_ENV !== "production",
};
const twitter = new TwitterClient(agent, config);
await twitter.start(); // Starts posting & monitoring intervals// Automatic posting loop
async generateAndPost() {
const responseText = await this.fetchTweetContent({
agentId: this.agent.getAgentId(),
userId: "twitter_client",
roomId: "twitter",
text: "<SYSTEM> Generate a new tweet to post on your timeline </SYSTEM>",
type: "text"
});
const tweets = await sendThreadedTweet(this, responseText);
// Store tweets in memory
for (const tweet of tweets) {
await storeTweetIfNotExists({
id: tweet.id,
text: tweet.text,
userId: this.config.username,
username: this.config.username,
conversationId: tweet.conversationId,
permanentUrl: tweet.permanentUrl
});
}
}// Check for new mentions
async checkInteractions() {
const mentions = await this.getMentions();
for (const mention of mentions) {
if (mention.id <= this.lastCheckedTweetId) continue;
await this.handleMention(mention);
this.lastCheckedTweetId = mention.id;
}
}
// Handle mention with agent
async handleMention(tweet) {
const responseText = await this.fetchTweetContent({
agentId: this.agent.getAgentId(),
userId: `tw_user_${tweet.userId}`,
roomId: tweet.conversationId || "twitter",
text: `@${tweet.username}: ${tweet.text}`,
type: "text"
});
const replies = await sendThreadedTweet(this, responseText, tweet.id);
}// Split long content into tweets
function splitTweetContent(text, maxLength = 280) {
if (text.length <= maxLength) return [text];
const tweets = [];
const sentences = text.match(/[^.!?]+[.!?]+/g) || [text];
let currentTweet = "";
for (const sentence of sentences) {
if ((currentTweet + sentence).length <= maxLength) {
currentTweet += sentence;
} else {
tweets.push(currentTweet.trim());
currentTweet = sentence;
}
}
if (currentTweet) tweets.push(currentTweet.trim());
return tweets;
}
// Send threaded tweets
async function sendThreadedTweet(client, content, replyToId) {
const tweets = [];
const parts = splitTweetContent(content);
let lastTweetId = replyToId;
for (const part of parts) {
const tweet = await client.sendTweet(part, lastTweetId);
tweets.push(tweet);
lastTweetId = tweet.id;
await new Promise((resolve) => setTimeout(resolve, 1000));
}
return tweets;
}// Store tweet in database
async function storeTweetIfNotExists(tweet) {
const exists = await prisma.tweet.count({
where: { id: tweet.id },
});
if (!exists) {
await prisma.tweet.create({
data: {
id: tweet.id,
text: tweet.text,
userId: tweet.userId,
username: tweet.username,
conversationId: tweet.conversationId,
inReplyToId: tweet.inReplyToId,
permanentUrl: tweet.permanentUrl,
},
});
return true;
}
return false;
}
// Get conversation thread
async function getTweetThread(conversationId) {
return prisma.tweet.findMany({
where: { conversationId },
orderBy: { createdAt: "asc" },
});
}