Added orchestration layer

This commit is contained in:
Storme-bit
2026-04-05 05:27:04 -07:00
parent 4b3f6455f9
commit eccda21992
5 changed files with 161 additions and 8 deletions

View File

@@ -0,0 +1,63 @@
const memory = require('./memory');
const inference = require('./inference');
const SYSTEM_PROMPT = `You are a helpful, context-aware AI assistant.
You have access to memories of past conversations with the user.
Use them to provide consistent, personalised responses.`;
const RECENT_EPISODE_LIMIT = 10; // Number of recent episodes to retrieve for context
function buildPrompt(getRecentEpisodes, userMessage) {
const parts = [SYSTEM_PROMPT];
if (getRecentEpisodes.length > 0) {
parts.push(`Here are some relevant memories from your past conversations:`);
for (const ep of recentEpisodes) {
parts.push(`User: ${ep.user_message}\nAssistant: ${ep.ai_response}`);
}
parts.push('--- End of recent memories ---\n');
}
parts.push(`User: ${userMessage}`);
parts.push('Assistant:');
return parts.join('\n');
}
async function chat(externalId, userMessage, options = {}) {
// 1. Resolve or create session
let session = await memory.getSessionByExternalId(externalId);
if (!session) {
session = await memory.createSession(externalId);
}
// 2. Fetch recent episodes for context
const recentEpisodes = await memory.getRecentEpisodes(
session.id,
RECENT_EPISODE_LIMIT
);
// 3. Assemble prompt
const prompt = buildPrompt(recentEpisodes, userMessage);
// 4. Run inference
const result = await inference.complete(prompt, options);
// 5. Write episode back to memory
memory.createEpisode(
session.id,
userMessage,
result.text,
(result.evalCount || 0) + (result.promptEvalCount || 0 )
).catch(err => console.error(`[orchestration] Failed to save episode`, err.message));
// 6. Return response
return {
sessionId: externalId,
response: result.text,
model: result.model,
tokenCount: (result.evalCount || 0 ) + (result.promptEvalCount || 0 ),
};
}
module.exports = { chat };