The doDistill
function is an asynchronous workflow manager that interacts with LLM modules and the Discord API to generate and post responses to user prompts. It uses conditional statements to determine the next step in the workflow, selecting an LLM tool based on user input and generating a short or long answer accordingly.
npm run import -- "discord llm connector"
const {postMessageImageAttachment} = importer.import("create message image attachments")
const {triggerTyping, createMessage, updateInteraction} = importer.import("disrcord api")
const {Remarkable} = require('remarkable');
const md = new Remarkable({html: true, xhtmlOut: true, breaks: true});
const { safeurl } = importer.import("domain cache tools")
const selectModel = importer.import("select llm")
const askLlamaToWriteStory = importer.import("write creatively llm")
const askLlamaToWriteBusinessPlan = importer.import("business plan llm")
const askLlamaWriteEssay = importer.import("research paper llm")
async function doDistill(interaction, promptModel = 'DeepSeek') {
if(typeof promptModel != 'function') {
promptModel = await selectModel(promptModel)
}
console.log('using model', promptModel.name)
await triggerTyping(interaction.channel_id)
let q1 = 'Is this prompt looking for a very long answer, Yes or No?\n' + interaction.data.options[0].value
+ 'Only respond with Yes or No and nothing else, no reasoning or instructions.'
if((await promptModel(q1)).match(/no/gi)) {
let a1 = await promptModel(interaction.data.options[0].value)
return await updateInteraction(a1.substring(0, 1800), interaction.id, interaction.token)
}
await updateInteraction('This could take a while...', interaction.id, interaction.token)
let q2 = 'Should I use the tool "essay" writer, "business plan", "creative story", or "default" prompter for this response?\n' + interaction.data.options[0].value + '\nAnswer "essay", "business", or "story" and nothing else, no explaination or instructions.'
let a2 = await promptModel(q2)
console.log('AI: ' + a2)
if(a2.match(/essay/gi)) {
let essay = await askLlamaWriteEssay(interaction.data.options[0].value, null, promptModel)
return await postMessageImageAttachment(interaction.data.options[0].value, Buffer.from(essay), interaction.channel_id, 'text/html')
} else if(a2.match(/business/gi)) {
let plan = await askLlamaToWriteBusinessPlan(interaction.data.options[0].value, null, promptModel)
// TODO: already writes to business-plans
return await postMessageImageAttachment(interaction.data.options[0].value, Buffer.from(plan), interaction.channel_id, 'text/html')
} else if(a2.match(/story/gi)) {
let plan = await askLlamaToWriteStory(interaction.data.options[0].value, null, promptModel)
// TODO: already writes to business-plans
return await postMessageImageAttachment(interaction.data.options[0].value, Buffer.from(plan), interaction.channel_id, 'text/html')
} else {
let answer = await promptModel(interaction.data.options[0].value)
const mdHtml = md.render(answer);
return await postMessageImageAttachment(interaction.data.options[0].value, Buffer.from(mdHtml), interaction.channel_id, 'text/html')
}
}
async function doReason(interaction) {
return doDistill(interaction, 'Qwen')
}
async function doInstruct(interaction) {
return doDistill(interaction, 'Llama-3-70B')
}
async function doMistral(interaction) {
return doDistill(interaction, 'Mistral')
}
module.exports = {
doDistill,
doReason,
doInstruct,
doMistral,
}
// Import required modules and functions
const {
postMessageImageAttachment,
triggerTyping,
createMessage,
updateInteraction,
} = require('./importer').import('discord-api');
const { safeurl } = require('./importer').import('domain-cache-tools');
const { selectModel,
askLlamaToWriteStory,
askLlamaToWriteBusinessPlan,
askLlamaWriteEssay,
} = require('./importer').import([
'select-llm',
'write-creatively-llm',
'business-plan-llm',
'research-paper-llm',
]);
const md = new (require('remarkable'))({ html: true, xhtmlOut: true, breaks: true });
/**
* Determines the response to a prompt based on the chosen model and prompt content.
*
* @param {Object} interaction - The Discord interaction containing prompt and options.
* @param {string} [promptModel='DeepSeek'] - The model to use for generating the response.
* @returns {Promise} - A promise resolving to the generated response.
*/
async function doDistill(interaction, promptModel = 'DeepSeek') {
if (typeof promptModel!== 'function') {
promptModel = await selectModel(promptModel);
}
console.log(`Using model: ${promptModel.name}`);
// Trigger typing indicator to show that the model is working
await triggerTyping(interaction.channel_id);
// Determine if the prompt requires a short or long response
const q1 = `Is this prompt looking for a very long answer, Yes or No?\n${interaction.data.options[0].value}`;
const a1 = await promptModel(q1);
if (a1.match(/no/gi)) {
// Short response, return the answer directly
return updateInteraction(a1.substring(0, 1800), interaction.id, interaction.token);
}
// Long response, trigger interaction update to show progress
await updateInteraction('This could take a while...', interaction.id, interaction.token);
// Determine which tool to use for generating the response
const q2 = `Should I use the tool "essay" writer, "business plan", "creative story", or "default" prompter for this response?\n${interaction.data.options[0].value}`;
const a2 = await promptModel(q2);
console.log(`AI: ${a2}`);
// Select the appropriate tool based on the user's preference
let result;
switch (a2.toLowerCase()) {
case 'essay':
result = await askLlamaWriteEssay(interaction.data.options[0].value, null, promptModel);
break;
case 'business':
result = await askLlamaToWriteBusinessPlan(interaction.data.options[0].value, null, promptModel);
break;
case'story':
result = await askLlamaToWriteStory(interaction.data.options[0].value, null, promptModel);
break;
default:
result = await promptModel(interaction.data.options[0].value);
break;
}
// Render the response as HTML and post it to the channel
const mdHtml = md.render(result);
return postMessageImageAttachment(interaction.data.options[0].value, Buffer.from(mdHtml), interaction.channel_id, 'text/html');
}
/**
* A wrapper function for doDistill with a default model.
*
* @param {Object} interaction - The Discord interaction containing prompt and options.
* @returns {Promise} - A promise resolving to the generated response.
*/
async function doReason(interaction) {
return doDistill(interaction, 'Qwen');
}
/**
* A wrapper function for doDistill with a default model.
*
* @param {Object} interaction - The Discord interaction containing prompt and options.
* @returns {Promise} - A promise resolving to the generated response.
*/
async function doInstruct(interaction) {
return doDistill(interaction, 'Llama-3-70B');
}
/**
* A wrapper function for doDistill with a default model.
*
* @param {Object} interaction - The Discord interaction containing prompt and options.
* @returns {Promise} - A promise resolving to the generated response.
*/
async function doMistral(interaction) {
return doDistill(interaction, 'Mistral');
}
module.exports = {
doDistill,
doReason,
doInstruct,
doMistral,
};
Code Breakdown
The code imports various functions and tools from other modules using the importer
object. The imported functions include:
postMessageImageAttachment
from create message image attachments
triggerTyping
, createMessage
, and updateInteraction
from disrcord api
safeurl
from domain cache tools
selectModel
from select llm
askLlamaToWriteStory
, askLlamaToWriteBusinessPlan
, and askLlamaWriteEssay
from various LLM (Large Language Model) modulesThe code creates an instance of the Remarkable
markdown parser, allowing for HTML output and line breaks.
The doDistill
function is an asynchronous function that takes an interaction
object and an optional promptModel
parameter. It appears to be a workflow manager that interacts with the LLM modules and Discord API to:
The function uses a series of conditional statements to determine the next step in the workflow. The main logic flow is:
selectModel
function.