Files
nexusAI/packages/orchestration-service/src/chat/index.js
2026-04-05 05:46:06 -07:00

63 lines
2.0 KiB
JavaScript

const memory = require('../services/memory');
const inference = require('../services/inference');
const SYSTEM_PROMPT = `You are a helpful, context-aware AI assistant.
You have access to memories of past conversations with the user.
Use them to provide consistent, personalised responses.`;
const RECENT_EPISODE_LIMIT = 10; // Number of recent episodes to retrieve for context
function buildPrompt(recentEpisodes, userMessage) {
const parts = [SYSTEM_PROMPT];
if (recentEpisodes.length > 0) {
parts.push(`Here are some relevant memories from your past conversations:`);
for (const ep of recentEpisodes) {
parts.push(`User: ${ep.user_message}\nAssistant: ${ep.ai_response}`);
}
parts.push('--- End of recent memories ---\n');
}
parts.push(`User: ${userMessage}`);
parts.push('Assistant:');
return parts.join('\n');
}
async function chat(externalId, userMessage, options = {}) {
// 1. Resolve or create session
let session = await memory.getSessionByExternalId(externalId);
if (!session) {
session = await memory.createSession(externalId);
}
// 2. Fetch recent episodes for context
const recentEpisodes = await memory.getRecentEpisodes(
session.id,
RECENT_EPISODE_LIMIT
);
// 3. Assemble prompt
const prompt = buildPrompt(recentEpisodes, userMessage);
// 4. Run inference
const result = await inference.complete(prompt, options);
// 5. Write episode back to memory
memory.createEpisode(
session.id,
userMessage,
result.text,
(result.evalCount || 0) + (result.promptEvalCount || 0 )
).catch(err => console.error(`[orchestration] Failed to save episode`, err.message));
// 6. Return response
return {
sessionId: externalId,
response: result.text,
model: result.model,
tokenCount: (result.evalCount || 0 ) + (result.promptEvalCount || 0 ),
};
}
module.exports = { chat };