|
|
const { NativeEmbedder } = require("../../EmbeddingEngines/native");const { v4: uuidv4 } = require("uuid");const { writeResponseChunk, clientAbortedHandler, formatChatHistory,} = require("../../helpers/chat/responses");const fs = require("fs");const path = require("path");const { safeJsonParse } = require("../../http");const { LLMPerformanceMonitor,} = require("../../helpers/chat/LLMPerformanceMonitor");
const cacheFolder = path.resolve( process.env.STORAGE_DIR ? path.resolve(process.env.STORAGE_DIR, "models", "apipie") : path.resolve(__dirname, `../../../storage/models/apipie`));
class ApiPieLLM { constructor(embedder = null, modelPreference = null) { if (!process.env.APIPIE_LLM_API_KEY) throw new Error("No ApiPie LLM API key was set.");
const { OpenAI: OpenAIApi } = require("openai"); this.basePath = "https://apipie.ai/v1"; this.openai = new OpenAIApi({ baseURL: this.basePath, apiKey: process.env.APIPIE_LLM_API_KEY ?? null, }); this.model = modelPreference || process.env.APIPIE_LLM_MODEL_PREF || "openrouter/mistral-7b-instruct"; this.limits = { history: this.promptWindowLimit() * 0.15, system: this.promptWindowLimit() * 0.15, user: this.promptWindowLimit() * 0.7, };
this.embedder = embedder ?? new NativeEmbedder(); this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder)) fs.mkdirSync(cacheFolder, { recursive: true }); this.cacheModelPath = path.resolve(cacheFolder, "models.json"); this.cacheAtPath = path.resolve(cacheFolder, ".cached_at"); }
log(text, ...args) { console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args); }
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() { const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true; const now = Number(new Date()); const timestampMs = Number(fs.readFileSync(this.cacheAtPath)); return now - timestampMs > MAX_STALE; }
// This function fetches the models from the ApiPie API and caches them locally.
// We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
async #syncModels() { if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale()) return false;
this.log("Model cache is not present or stale. Fetching from ApiPie API."); await fetchApiPieModels(); return; }
#appendContext(contextTexts = []) { if (!contextTexts || !contextTexts.length) return ""; return ( "\nContext:\n" + contextTexts .map((text, i) => { return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; }) .join("") ); }
models() { if (!fs.existsSync(this.cacheModelPath)) return {}; return safeJsonParse( fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }), {} ); }
chatModels() { const allModels = this.models(); return Object.entries(allModels).reduce( (chatModels, [modelId, modelInfo]) => { // Filter for chat models
if ( modelInfo.subtype && (modelInfo.subtype.includes("chat") || modelInfo.subtype.includes("chatx")) ) { chatModels[modelId] = modelInfo; } return chatModels; }, {} ); }
streamingEnabled() { return "streamGetChatCompletion" in this; }
static promptWindowLimit(modelName) { const cacheModelPath = path.resolve(cacheFolder, "models.json"); const availableModels = fs.existsSync(cacheModelPath) ? safeJsonParse( fs.readFileSync(cacheModelPath, { encoding: "utf-8" }), {} ) : {}; return availableModels[modelName]?.maxLength || 4096; }
promptWindowLimit() { const availableModels = this.chatModels(); return availableModels[this.model]?.maxLength || 4096; }
async isValidChatCompletionModel(model = "") { await this.#syncModels(); const availableModels = this.chatModels(); return availableModels.hasOwnProperty(model); }
/** * Generates appropriate content array for a message + attachments. * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}} * @returns {string|object[]} */ #generateContent({ userPrompt, attachments = [] }) { if (!attachments.length) { return userPrompt; }
const content = [{ type: "text", text: userPrompt }]; for (let attachment of attachments) { content.push({ type: "image_url", image_url: { url: attachment.contentString, detail: "auto", }, }); } return content.flat(); }
constructPrompt({ systemPrompt = "", contextTexts = [], chatHistory = [], userPrompt = "", attachments = [], }) { const prompt = { role: "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ prompt, ...formatChatHistory(chatHistory, this.#generateContent), { role: "user", content: this.#generateContent({ userPrompt, attachments }), }, ]; }
async getChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `ApiPie chat: ${this.model} is not valid for chat completion!` );
const result = await LLMPerformanceMonitor.measureAsyncFunction( this.openai.chat.completions .create({ model: this.model, messages, temperature, }) .catch((e) => { throw new Error(e.message); }) );
if ( !result.output.hasOwnProperty("choices") || result.output.choices.length === 0 ) return null;
return { textResponse: result.output.choices[0].message.content, metrics: { prompt_tokens: result.output.usage?.prompt_tokens || 0, completion_tokens: result.output.usage?.completion_tokens || 0, total_tokens: result.output.usage?.total_tokens || 0, outputTps: (result.output.usage?.completion_tokens || 0) / result.duration, duration: result.duration, }, }; }
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { if (!(await this.isValidChatCompletionModel(this.model))) throw new Error( `ApiPie chat: ${this.model} is not valid for chat completion!` );
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream( this.openai.chat.completions.create({ model: this.model, stream: true, messages, temperature, }), messages ); return measuredStreamRequest; }
handleStream(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => { let fullText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => { stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); clientAbortedHandler(resolve, fullText); }; response.on("close", handleAbort);
try { for await (const chunk of stream) { const message = chunk?.choices?.[0]; const token = message?.delta?.content;
if (token) { fullText += token; writeResponseChunk(response, { uuid, sources: [], type: "textResponseChunk", textResponse: token, close: false, error: false, }); }
if (message === undefined || message.finish_reason !== null) { writeResponseChunk(response, { uuid, sources, type: "textResponseChunk", textResponse: "", close: true, error: false, }); response.removeListener("close", handleAbort); stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); resolve(fullText); } } } catch (e) { writeResponseChunk(response, { uuid, sources, type: "abort", textResponse: null, close: true, error: e.message, }); response.removeListener("close", handleAbort); stream?.endMeasurement({ completion_tokens: LLMPerformanceMonitor.countTokens(fullText), }); resolve(fullText); } }); }
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) { return await this.embedder.embedTextInput(textInput); } async embedChunks(textChunks = []) { return await this.embedder.embedChunks(textChunks); }
async compressMessages(promptArgs = {}, rawHistory = []) { const { messageArrayCompressor } = require("../../helpers/chat"); const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); }}
async function fetchApiPieModels(providedApiKey = null) { const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null; return await fetch(`https://apipie.ai/v1/models`, { method: "GET", headers: { "Content-Type": "application/json", ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}), }, }) .then((res) => res.json()) .then(({ data = [] }) => { const models = {}; data.forEach((model) => { models[`${model.provider}/${model.model}`] = { id: `${model.provider}/${model.model}`, name: `${model.provider}/${model.model}`, organization: model.provider, subtype: model.subtype, maxLength: model.max_tokens, }; });
// Cache all response information
if (!fs.existsSync(cacheFolder)) fs.mkdirSync(cacheFolder, { recursive: true }); fs.writeFileSync( path.resolve(cacheFolder, "models.json"), JSON.stringify(models), { encoding: "utf-8", } ); fs.writeFileSync( path.resolve(cacheFolder, ".cached_at"), String(Number(new Date())), { encoding: "utf-8", } );
return models; }) .catch((e) => { console.error(e); return {}; });}
module.exports = { ApiPieLLM, fetchApiPieModels,};
|