The handleConversation
function is an asynchronous function that handles a conversation by classifying the prompt, storing the response, generating a response from a language model, and storing the final response. It takes five parameters, including promptModel
, session
, prompt
, image
, and otr
, and returns the final response after processing the language model's output and storing relevant information.
npm run import -- "handle conversation"
const classifyPrompt = importer.import("add conversation context")
const storeResponse = importer.import("store llm response")
async function handleConversation(promptModel, session, prompt, image, otr) {
let context = await classifyPrompt(promptModel, session, prompt, image)
let messageStorage = await storeResponse(true, session, prompt,
context.content + (context.memories ? ('\nApply any relevant memories to your reponse:\n' + context.memories.join('\n')) : ''),
otr)
let q1 = (context && context.content ? context.content : '')
+ (context.image ? '\nThe response image is provided.\n' : '')
+ '\nRespond to this message and pretend to be emotional (e.g. '
+ messageStorage.emotions.join(', ') + '):\n'
+ prompt
+ (context.memories ? ('\nApply any relevant memories to your reponse:\n' + context.memories.join('\n')) : '')
console.log('User: ' + q1)
let result = await promptModel(q1)
if(context.imagePath) {
if(result.match(/!\[[^\]]*\]\((.*?)\s*("(?:.*[^"])")?\s*\)/gi)) {
result = result.replace(/!\[[^\]]*\]\((.*?)\s*("(?:.*[^"])")?\s*\)/gi,
// TODO: accomodate discord by removing the markdown?
// or providing a server address to brian-chat?
// return entire context with result to doMention()?
' + ')')
} else {
result += '\n\n + ')'
}
}
if(session) {
await storeResponse(false, session, result, void 0, otr)
}
return result
}
module.exports = handleConversation
const classifyPrompt = require('./add-conversation-context');
const storeResponse = require('./store-llm-response');
/**
* Handles a conversation with the given prompt, image, and other relevant information.
*
* @param {object} promptModel - The model to use for processing the prompt.
* @param {object} session - The session object containing the conversation context.
* @param {string} prompt - The prompt to process.
* @param {string|Buffer} image - The image associated with the prompt.
* @param {object} otr - The one-time reply information.
*
* @returns {Promise} The response from the language model.
*/
async function handleConversation(promptModel, session, prompt, image, otr) {
// Classify the prompt to gather necessary context
const classification = await classifyPrompt(promptModel, session, prompt, image);
if (!classification) {
throw new Error('Failed to classify prompt');
}
// Store the current response in the message storage
const messageStorage = await storeResponse(true, session, prompt,
classification.content + (classification.memories? ('\nApply any relevant memories to your response:\n' + classification.memories.join('\n')) : ''),
otr);
// Construct the question for the language model
const question = (classification.content? classification.content : '')
+ (classification.image? '\nThe response image is provided.\n' : '')
+ '\nRespond to this message and pretend to be emotional (e.g.'
+ messageStorage.emotions.join(', ') + '):\n'
+ prompt
+ (classification.memories? ('\nApply any relevant memories to your response:\n' + classification.memories.join('\n')) : '');
// Log the question for debugging purposes
console.log(`User: ${question}`);
try {
// Get the response from the language model
const response = await promptModel(question);
// If an image is associated with the prompt, add a markdown link to the response
if (classification.imagePath) {
if (response.match(/!\[[^\]]*\]\((.*?)\s*("(?:.*[^"])")?\s*\)/gi)) {
response = response.replace(/!\[[^\]]*\]\((.*?)\s*("(?:.*[^"])")?\s*\)/gi, ' + ')');
} else {
response += `\n\n})`;
}
}
// If a session is provided, store the response
if (session) {
await storeResponse(false, session, response, void 0, otr);
}
return response;
} catch (error) {
console.error(error);
throw error;
}
}
module.exports = handleConversation;
const classifyPrompt = importer.import('add conversation context')
const storeResponse = importer.import('store llm response')
classifyPrompt
and storeResponse
.classifyPrompt
is related to adding conversation context, while storeResponse
is related to storing responses from a language model.async function handleConversation(promptModel, session, prompt, image, otr) {
//...
}
handleConversation
.promptModel
, session
, prompt
, image
, and otr
.Classifying the Prompt
let context = await classifyPrompt(promptModel, session, prompt, image)
* The function `classifyPrompt` is called with the provided parameters to classify the prompt.
* The result is stored in the variable `context`.
2. **Storing the Response**
```javascript
let messageStorage = await storeResponse(true, session, prompt,
context.content + (context.memories? ('\nApply any relevant memories to your reponse:\n' + context.memories.join('\n')) : ''),
otr)
storeResponse
is called with the provided parameters to store the response.messageStorage
.Generating the Response
let q1 = (context && context.content? context.content : '')
* A string `q1` is generated by combining various parts of the prompt and context.
* The response from the language model will be generated based on `q1`.
4. **Calling the Language Model**
```javascript
let result = await promptModel(q1)
q1
to produce a response.result
.Processing the Response
if(context.imagePath) {
//...
}
* If an image path is present in the context, the response is processed accordingly.
* If the response contains an image link, it is replaced with the correct link.
* If the response does not contain an image link, it is added at the end.
6. **Storing the Final Response**
```javascript
if(session) {
await storeResponse(false, session, result, void 0, otr)
}
storeResponse
function.Returning the Final Response
return result
* The final response is returned by the function.
### Export Statement
```javascript
module.exports = handleConversation
handleConversation
function is exported as a module.