You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

571 lines
16 KiB

11 months ago
  1. const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
  2. const { fetchApiPieModels } = require("../AiProviders/apipie");
  3. const { perplexityModels } = require("../AiProviders/perplexity");
  4. const { togetherAiModels } = require("../AiProviders/togetherAi");
  5. const { fireworksAiModels } = require("../AiProviders/fireworksAi");
  6. const { ElevenLabsTTS } = require("../TextToSpeech/elevenLabs");
  7. const { fetchNovitaModels } = require("../AiProviders/novita");
  8. const { parseLMStudioBasePath } = require("../AiProviders/lmStudio");
  9. const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
  10. const { GeminiLLM } = require("../AiProviders/gemini");
  11. const SUPPORT_CUSTOM_MODELS = [
  12. "openai",
  13. "localai",
  14. "ollama",
  15. "togetherai",
  16. "fireworksai",
  17. "nvidia-nim",
  18. "mistral",
  19. "perplexity",
  20. "openrouter",
  21. "lmstudio",
  22. "koboldcpp",
  23. "litellm",
  24. "elevenlabs-tts",
  25. "groq",
  26. "deepseek",
  27. "apipie",
  28. "novita",
  29. "xai",
  30. "gemini",
  31. ];
  32. async function getCustomModels(provider = "", apiKey = null, basePath = null) {
  33. if (!SUPPORT_CUSTOM_MODELS.includes(provider))
  34. return { models: [], error: "Invalid provider for custom models" };
  35. switch (provider) {
  36. case "openai":
  37. return await openAiModels(apiKey);
  38. case "localai":
  39. return await localAIModels(basePath, apiKey);
  40. case "ollama":
  41. return await ollamaAIModels(basePath);
  42. case "togetherai":
  43. return await getTogetherAiModels(apiKey);
  44. case "fireworksai":
  45. return await getFireworksAiModels(apiKey);
  46. case "mistral":
  47. return await getMistralModels(apiKey);
  48. case "perplexity":
  49. return await getPerplexityModels();
  50. case "openrouter":
  51. return await getOpenRouterModels();
  52. case "lmstudio":
  53. return await getLMStudioModels(basePath);
  54. case "koboldcpp":
  55. return await getKoboldCPPModels(basePath);
  56. case "litellm":
  57. return await liteLLMModels(basePath, apiKey);
  58. case "elevenlabs-tts":
  59. return await getElevenLabsModels(apiKey);
  60. case "groq":
  61. return await getGroqAiModels(apiKey);
  62. case "deepseek":
  63. return await getDeepSeekModels(apiKey);
  64. case "apipie":
  65. return await getAPIPieModels(apiKey);
  66. case "novita":
  67. return await getNovitaModels();
  68. case "xai":
  69. return await getXAIModels(apiKey);
  70. case "nvidia-nim":
  71. return await getNvidiaNimModels(basePath);
  72. case "gemini":
  73. return await getGeminiModels(apiKey);
  74. default:
  75. return { models: [], error: "Invalid provider for custom models" };
  76. }
  77. }
  78. async function openAiModels(apiKey = null) {
  79. const { OpenAI: OpenAIApi } = require("openai");
  80. const openai = new OpenAIApi({
  81. apiKey: apiKey || process.env.OPEN_AI_KEY,
  82. });
  83. const allModels = await openai.models
  84. .list()
  85. .then((results) => results.data)
  86. .catch((e) => {
  87. console.error(`OpenAI:listModels`, e.message);
  88. return [
  89. {
  90. name: "gpt-3.5-turbo",
  91. id: "gpt-3.5-turbo",
  92. object: "model",
  93. created: 1677610602,
  94. owned_by: "openai",
  95. organization: "OpenAi",
  96. },
  97. {
  98. name: "gpt-4o",
  99. id: "gpt-4o",
  100. object: "model",
  101. created: 1677610602,
  102. owned_by: "openai",
  103. organization: "OpenAi",
  104. },
  105. {
  106. name: "gpt-4",
  107. id: "gpt-4",
  108. object: "model",
  109. created: 1687882411,
  110. owned_by: "openai",
  111. organization: "OpenAi",
  112. },
  113. {
  114. name: "gpt-4-turbo",
  115. id: "gpt-4-turbo",
  116. object: "model",
  117. created: 1712361441,
  118. owned_by: "system",
  119. organization: "OpenAi",
  120. },
  121. {
  122. name: "gpt-4-32k",
  123. id: "gpt-4-32k",
  124. object: "model",
  125. created: 1687979321,
  126. owned_by: "openai",
  127. organization: "OpenAi",
  128. },
  129. {
  130. name: "gpt-3.5-turbo-16k",
  131. id: "gpt-3.5-turbo-16k",
  132. object: "model",
  133. created: 1683758102,
  134. owned_by: "openai-internal",
  135. organization: "OpenAi",
  136. },
  137. ];
  138. });
  139. const gpts = allModels
  140. .filter(
  141. (model) =>
  142. (model.id.includes("gpt") && !model.id.startsWith("ft:")) ||
  143. model.id.startsWith("o") // o1, o1-mini, o3, etc
  144. )
  145. .filter(
  146. (model) =>
  147. !model.id.includes("vision") &&
  148. !model.id.includes("instruct") &&
  149. !model.id.includes("audio") &&
  150. !model.id.includes("realtime")
  151. )
  152. .map((model) => {
  153. return {
  154. ...model,
  155. name: model.id,
  156. organization: "OpenAi",
  157. };
  158. });
  159. const customModels = allModels
  160. .filter(
  161. (model) =>
  162. !model.owned_by.includes("openai") && model.owned_by !== "system"
  163. )
  164. .map((model) => {
  165. return {
  166. ...model,
  167. name: model.id,
  168. organization: "Your Fine-Tunes",
  169. };
  170. });
  171. // Api Key was successful so lets save it for future uses
  172. if ((gpts.length > 0 || customModels.length > 0) && !!apiKey)
  173. process.env.OPEN_AI_KEY = apiKey;
  174. return { models: [...gpts, ...customModels], error: null };
  175. }
  176. async function localAIModels(basePath = null, apiKey = null) {
  177. const { OpenAI: OpenAIApi } = require("openai");
  178. const openai = new OpenAIApi({
  179. baseURL: basePath || process.env.LOCAL_AI_BASE_PATH,
  180. apiKey: apiKey || process.env.LOCAL_AI_API_KEY || null,
  181. });
  182. const models = await openai.models
  183. .list()
  184. .then((results) => results.data)
  185. .catch((e) => {
  186. console.error(`LocalAI:listModels`, e.message);
  187. return [];
  188. });
  189. // Api Key was successful so lets save it for future uses
  190. if (models.length > 0 && !!apiKey) process.env.LOCAL_AI_API_KEY = apiKey;
  191. return { models, error: null };
  192. }
  193. async function getGroqAiModels(_apiKey = null) {
  194. const { OpenAI: OpenAIApi } = require("openai");
  195. const apiKey =
  196. _apiKey === true
  197. ? process.env.GROQ_API_KEY
  198. : _apiKey || process.env.GROQ_API_KEY || null;
  199. const openai = new OpenAIApi({
  200. baseURL: "https://api.groq.com/openai/v1",
  201. apiKey,
  202. });
  203. const models = (
  204. await openai.models
  205. .list()
  206. .then((results) => results.data)
  207. .catch((e) => {
  208. console.error(`GroqAi:listModels`, e.message);
  209. return [];
  210. })
  211. ).filter(
  212. (model) => !model.id.includes("whisper") && !model.id.includes("tool-use")
  213. );
  214. // Api Key was successful so lets save it for future uses
  215. if (models.length > 0 && !!apiKey) process.env.GROQ_API_KEY = apiKey;
  216. return { models, error: null };
  217. }
  218. async function liteLLMModels(basePath = null, apiKey = null) {
  219. const { OpenAI: OpenAIApi } = require("openai");
  220. const openai = new OpenAIApi({
  221. baseURL: basePath || process.env.LITE_LLM_BASE_PATH,
  222. apiKey: apiKey || process.env.LITE_LLM_API_KEY || null,
  223. });
  224. const models = await openai.models
  225. .list()
  226. .then((results) => results.data)
  227. .catch((e) => {
  228. console.error(`LiteLLM:listModels`, e.message);
  229. return [];
  230. });
  231. // Api Key was successful so lets save it for future uses
  232. if (models.length > 0 && !!apiKey) process.env.LITE_LLM_API_KEY = apiKey;
  233. return { models, error: null };
  234. }
  235. async function getLMStudioModels(basePath = null) {
  236. try {
  237. const { OpenAI: OpenAIApi } = require("openai");
  238. const openai = new OpenAIApi({
  239. baseURL: parseLMStudioBasePath(
  240. basePath || process.env.LMSTUDIO_BASE_PATH
  241. ),
  242. apiKey: null,
  243. });
  244. const models = await openai.models
  245. .list()
  246. .then((results) => results.data)
  247. .catch((e) => {
  248. console.error(`LMStudio:listModels`, e.message);
  249. return [];
  250. });
  251. return { models, error: null };
  252. } catch (e) {
  253. console.error(`LMStudio:getLMStudioModels`, e.message);
  254. return { models: [], error: "Could not fetch LMStudio Models" };
  255. }
  256. }
  257. async function getKoboldCPPModels(basePath = null) {
  258. try {
  259. const { OpenAI: OpenAIApi } = require("openai");
  260. const openai = new OpenAIApi({
  261. baseURL: basePath || process.env.KOBOLD_CPP_BASE_PATH,
  262. apiKey: null,
  263. });
  264. const models = await openai.models
  265. .list()
  266. .then((results) => results.data)
  267. .catch((e) => {
  268. console.error(`KoboldCPP:listModels`, e.message);
  269. return [];
  270. });
  271. return { models, error: null };
  272. } catch (e) {
  273. console.error(`KoboldCPP:getKoboldCPPModels`, e.message);
  274. return { models: [], error: "Could not fetch KoboldCPP Models" };
  275. }
  276. }
  277. async function ollamaAIModels(basePath = null) {
  278. let url;
  279. try {
  280. let urlPath = basePath ?? process.env.OLLAMA_BASE_PATH;
  281. new URL(urlPath);
  282. if (urlPath.split("").slice(-1)?.[0] === "/")
  283. throw new Error("BasePath Cannot end in /!");
  284. url = urlPath;
  285. } catch {
  286. return { models: [], error: "Not a valid URL." };
  287. }
  288. const models = await fetch(`${url}/api/tags`)
  289. .then((res) => {
  290. if (!res.ok)
  291. throw new Error(`Could not reach Ollama server! ${res.status}`);
  292. return res.json();
  293. })
  294. .then((data) => data?.models || [])
  295. .then((models) =>
  296. models.map((model) => {
  297. return { id: model.name };
  298. })
  299. )
  300. .catch((e) => {
  301. console.error(e);
  302. return [];
  303. });
  304. return { models, error: null };
  305. }
  306. async function getTogetherAiModels(apiKey = null) {
  307. const _apiKey =
  308. apiKey === true
  309. ? process.env.TOGETHER_AI_API_KEY
  310. : apiKey || process.env.TOGETHER_AI_API_KEY || null;
  311. try {
  312. const { togetherAiModels } = require("../AiProviders/togetherAi");
  313. const models = await togetherAiModels(_apiKey);
  314. if (models.length > 0 && !!_apiKey)
  315. process.env.TOGETHER_AI_API_KEY = _apiKey;
  316. return { models, error: null };
  317. } catch (error) {
  318. console.error("Error in getTogetherAiModels:", error);
  319. return { models: [], error: "Failed to fetch Together AI models" };
  320. }
  321. }
  322. async function getFireworksAiModels() {
  323. const knownModels = fireworksAiModels();
  324. if (!Object.keys(knownModels).length === 0)
  325. return { models: [], error: null };
  326. const models = Object.values(knownModels).map((model) => {
  327. return {
  328. id: model.id,
  329. organization: model.organization,
  330. name: model.name,
  331. };
  332. });
  333. return { models, error: null };
  334. }
  335. async function getPerplexityModels() {
  336. const knownModels = perplexityModels();
  337. if (!Object.keys(knownModels).length === 0)
  338. return { models: [], error: null };
  339. const models = Object.values(knownModels).map((model) => {
  340. return {
  341. id: model.id,
  342. name: model.name,
  343. };
  344. });
  345. return { models, error: null };
  346. }
  347. async function getOpenRouterModels() {
  348. const knownModels = await fetchOpenRouterModels();
  349. if (!Object.keys(knownModels).length === 0)
  350. return { models: [], error: null };
  351. const models = Object.values(knownModels).map((model) => {
  352. return {
  353. id: model.id,
  354. organization: model.organization,
  355. name: model.name,
  356. };
  357. });
  358. return { models, error: null };
  359. }
  360. async function getNovitaModels() {
  361. const knownModels = await fetchNovitaModels();
  362. if (!Object.keys(knownModels).length === 0)
  363. return { models: [], error: null };
  364. const models = Object.values(knownModels).map((model) => {
  365. return {
  366. id: model.id,
  367. organization: model.organization,
  368. name: model.name,
  369. };
  370. });
  371. return { models, error: null };
  372. }
  373. async function getAPIPieModels(apiKey = null) {
  374. const knownModels = await fetchApiPieModels(apiKey);
  375. if (!Object.keys(knownModels).length === 0)
  376. return { models: [], error: null };
  377. const models = Object.values(knownModels)
  378. .filter((model) => {
  379. // Filter for chat models
  380. return (
  381. model.subtype &&
  382. (model.subtype.includes("chat") || model.subtype.includes("chatx"))
  383. );
  384. })
  385. .map((model) => {
  386. return {
  387. id: model.id,
  388. organization: model.organization,
  389. name: model.name,
  390. };
  391. });
  392. return { models, error: null };
  393. }
  394. async function getMistralModels(apiKey = null) {
  395. const { OpenAI: OpenAIApi } = require("openai");
  396. const openai = new OpenAIApi({
  397. apiKey: apiKey || process.env.MISTRAL_API_KEY || null,
  398. baseURL: "https://api.mistral.ai/v1",
  399. });
  400. const models = await openai.models
  401. .list()
  402. .then((results) =>
  403. results.data.filter((model) => !model.id.includes("embed"))
  404. )
  405. .catch((e) => {
  406. console.error(`Mistral:listModels`, e.message);
  407. return [];
  408. });
  409. // Api Key was successful so lets save it for future uses
  410. if (models.length > 0 && !!apiKey) process.env.MISTRAL_API_KEY = apiKey;
  411. return { models, error: null };
  412. }
  413. async function getElevenLabsModels(apiKey = null) {
  414. const models = (await ElevenLabsTTS.voices(apiKey)).map((model) => {
  415. return {
  416. id: model.voice_id,
  417. organization: model.category,
  418. name: model.name,
  419. };
  420. });
  421. if (models.length === 0) {
  422. return {
  423. models: [
  424. {
  425. id: "21m00Tcm4TlvDq8ikWAM",
  426. organization: "premade",
  427. name: "Rachel (default)",
  428. },
  429. ],
  430. error: null,
  431. };
  432. }
  433. if (models.length > 0 && !!apiKey) process.env.TTS_ELEVEN_LABS_KEY = apiKey;
  434. return { models, error: null };
  435. }
  436. async function getDeepSeekModels(apiKey = null) {
  437. const { OpenAI: OpenAIApi } = require("openai");
  438. const openai = new OpenAIApi({
  439. apiKey: apiKey || process.env.DEEPSEEK_API_KEY,
  440. baseURL: "https://api.deepseek.com/v1",
  441. });
  442. const models = await openai.models
  443. .list()
  444. .then((results) => results.data)
  445. .then((models) =>
  446. models.map((model) => ({
  447. id: model.id,
  448. name: model.id,
  449. organization: model.owned_by,
  450. }))
  451. )
  452. .catch((e) => {
  453. console.error(`DeepSeek:listModels`, e.message);
  454. return [];
  455. });
  456. if (models.length > 0 && !!apiKey) process.env.DEEPSEEK_API_KEY = apiKey;
  457. return { models, error: null };
  458. }
  459. async function getXAIModels(_apiKey = null) {
  460. const { OpenAI: OpenAIApi } = require("openai");
  461. const apiKey =
  462. _apiKey === true
  463. ? process.env.XAI_LLM_API_KEY
  464. : _apiKey || process.env.XAI_LLM_API_KEY || null;
  465. const openai = new OpenAIApi({
  466. baseURL: "https://api.x.ai/v1",
  467. apiKey,
  468. });
  469. const models = await openai.models
  470. .list()
  471. .then((results) => results.data)
  472. .catch((e) => {
  473. console.error(`XAI:listModels`, e.message);
  474. return [
  475. {
  476. created: 1725148800,
  477. id: "grok-beta",
  478. object: "model",
  479. owned_by: "xai",
  480. },
  481. ];
  482. });
  483. // Api Key was successful so lets save it for future uses
  484. if (models.length > 0 && !!apiKey) process.env.XAI_LLM_API_KEY = apiKey;
  485. return { models, error: null };
  486. }
  487. async function getNvidiaNimModels(basePath = null) {
  488. try {
  489. const { OpenAI: OpenAIApi } = require("openai");
  490. const openai = new OpenAIApi({
  491. baseURL: parseNvidiaNimBasePath(
  492. basePath ?? process.env.NVIDIA_NIM_LLM_BASE_PATH
  493. ),
  494. apiKey: null,
  495. });
  496. const modelResponse = await openai.models
  497. .list()
  498. .then((results) => results.data)
  499. .catch((e) => {
  500. throw new Error(e.message);
  501. });
  502. const models = modelResponse.map((model) => {
  503. return {
  504. id: model.id,
  505. name: model.id,
  506. organization: model.owned_by,
  507. };
  508. });
  509. return { models, error: null };
  510. } catch (e) {
  511. console.error(`NVIDIA NIM:getNvidiaNimModels`, e.message);
  512. return { models: [], error: "Could not fetch NVIDIA NIM Models" };
  513. }
  514. }
  515. async function getGeminiModels(_apiKey = null) {
  516. const apiKey =
  517. _apiKey === true
  518. ? process.env.GEMINI_API_KEY
  519. : _apiKey || process.env.GEMINI_API_KEY || null;
  520. const models = await GeminiLLM.fetchModels(apiKey);
  521. // Api Key was successful so lets save it for future uses
  522. if (models.length > 0 && !!apiKey) process.env.GEMINI_API_KEY = apiKey;
  523. return { models, error: null };
  524. }
  525. module.exports = {
  526. getCustomModels,
  527. };