The code imports constants and uses two functions, askLlamaMatchTimestamps
and matchingTimestamps
, to process timestamps and generate responses based on keywords and prompts. The matchingTimestamps
function iterates over conversations, generates messages, and calls the askLlamaMatchTimestamps
function to match timestamps with the generated responses, returning an array of matching timestamps.
npm run import -- "relevant history timestamps"
const {ACTIVE_CONVERSATIONS, PROJECT_PATH, DEFAULT_MODEL} = importer.import("general chit chat")
async function askLlamaMatchTimestamps(promptModel, messages, keywords, prompt, timestamps) {
let q3 = 'Given the following keywords:\n'
+ keywords.join('\n')
+ '\n' + messages
+ prompt + '\nOnly respond with related and unique timestamps, no explanations.'
console.log('User: ' + q3)
let a3 = await promptModel(q3)
console.log('AI: ' + a3)
return timestamps
.filter(time => a3.match(time) || a3.match(new Date(parseInt(time)).toISOString()))
}
async function matchingTimestamps(promptModel, session, prompt, keywords) {
let matchingTimestamps = []
let messages = 'Current date: ' + (new Date).toISOString()
+ '\nOur recent topics:\n'
let originalTimestamp = messages
let loadedConversations = Object.keys(ACTIVE_CONVERSATIONS)
.filter(key => key.match('-' + DEFAULT_MODEL + '-' + session + '.json'))
for(let i = 0; i < loadedConversations.length; i++) {
let conversation = ACTIVE_CONVERSATIONS[loadedConversations[i]]
let timestamps = Object.keys(conversation).filter(k => k != 'summaries' && k != 'memories')
timestamps.sort((a, b) => b - a)
for(let j = 0; j < timestamps.length; j++) {
let message = conversation[timestamps[j]]
let topics = keywords.filter(key => message.keywords.match(key))
if(!prompt.match(timestamps[j]) && topics.length == 0) {
continue
}
messages += new Date(parseInt(timestamps[j])).toISOString()
+ ' - ' + topics.join(', ')
+ (message.summary ? (' - ' + message.summary) : '')
+ '\n'
if(messages.length > 2048) {
let newTimestamps = await askLlamaMatchTimestamps(promptModel, messages, keywords, prompt, timestamps)
matchingTimestamps = matchingTimestamps.concat(newTimestamps)
messages = originalTimestamp
}
}
if(messages.length > originalTimestamp.length) {
let newTimestamps = await askLlamaMatchTimestamps(promptModel, messages, keywords, prompt, timestamps)
matchingTimestamps = matchingTimestamps.concat(newTimestamps)
}
}
return matchingTimestamps
}
module.exports = matchingTimestamps
```javascript
const { ACTIVE_CONVERSATIONS, PROJECT_PATH, DEFAULT_MODEL } = require('./general-chit-chat');
/**
* Filter timestamps from AI response.
* @param {string[]} timestamps - Initial list of timestamps.
* @param {string} promptModel - AI model response.
* @returns {string[]} Filtered timestamps.
*/
async function filterTimestamps(promptModel, timestamps) {
// Use regular expression to match timestamps in AI response.
const regex = new RegExp(timestamps.map(time => escapeRegExp(time)).join('|'));
return timestamps.filter(time => regex.test(promptModel));
}
/**
* Ask LLaMA to match timestamps.
* @param {string} promptModel - AI model response.
* @param {string[]} messages - Messages to be matched.
* @param {string[]} keywords - Keywords to be matched.
* @param {string} prompt - Prompt to be matched.
* @param {string[]} timestamps - Timestamps to be matched.
* @returns {string[]} Matching timestamps.
*/
async function askLlamaMatchTimestamps(promptModel, messages, keywords, prompt, timestamps) {
// Construct the prompt for LLaMA.
const q3 = `Given the following keywords:\n${keywords.join('\n')}\n${messages}${prompt}\nOnly respond with related and unique timestamps, no explanations.`;
console.log('User:', q3);
let a3 = await promptModel(q3);
console.log('AI:', a3);
return await filterTimestamps(a3, timestamps);
}
/**
* Matching timestamps and return.
* @param {import('general-chit-chat').LlaMAModelType} promptModel - AI model.
* @param {string} session - Session ID.
* @param {string} prompt - Prompt to be matched.
* @param {string[]} keywords - Keywords to be matched.
* @returns {Promise<string[]>} Matching timestamps.
*/
async function matchingTimestamps(promptModel, session, prompt, keywords) {
// Initialize matching timestamps.
const matchingTimestamps = [];
// Get recent conversations.
const loadedConversations = Object.keys(ACTIVE_CONVERSATIONS)
.filter(key => key.match(`-${DEFAULT_MODEL}-${session}.json`));
// Loop through each conversation.
for (const conversation of loadedConversations) {
// Get timestamps and messages.
const timestamps = Object.keys(ACTIVE_CONVERSATIONS[conversation]).filter(k => k!=='summaries' && k!=='memories');
timestamps.sort((a, b) => b - a);
// Loop through each timestamp.
for (const timestamp of timestamps) {
// Get message.
const message = ACTIVE_CONVERSATIONS[conversation][timestamp];
// Check if prompt or keywords match.
if (prompt.match(timestamp) || keywords.some(key => message.keywords.match(key))) {
// Add timestamp to messages.
const newMessages = `Current date: ${new Date().toISOString()}\nOur recent topics:\n${matchingTimestamps.length > 0? '...' : ''}${matchingTimestamps.join('\n')}\n${new Date(parseInt(timestamp)).toISOString()} - ${keywords.filter(key => message.keywords.match(key)).join(', ')}${message.summary? ` - ${message.summary}` : ''}\n`;
// Check if messages are too long.
if (newMessages.length > 2048) {
// Ask LLaMA to match timestamps.
const newTimestamps = await askLlamaMatchTimestamps(promptModel, newMessages, keywords, prompt, timestamps);
matchingTimestamps = matchingTimestamps.concat(newTimestamps);
// Reset messages.
matchingTimestamps.push(timestamp);
} else {
// Add timestamp to matching timestamps.
matchingTimestamps.push(timestamp);
}
}
}
}
// Return matching timestamps.
return matchingTimestamps;
}
module.exports = matchingTimestamps;
// Helper function to escape special characters in regular expression.
function escapeRegExp(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\${OUTPUT}amp;');
}
```
Code Breakdown
The code starts by importing constants from an importer
module:
const { ACTIVE_CONVERSATIONS, PROJECT_PATH, DEFAULT_MODEL } = importer.import('general chit chat')
These constants are likely used throughout the codebase and represent:
ACTIVE_CONVERSATIONS
: an object containing active conversationsPROJECT_PATH
: the path to the project directoryDEFAULT_MODEL
: the default model to useThis function takes in several parameters:
promptModel
: a function to generate a promptmessages
: a string of messages to processkeywords
: an array of keywords to matchprompt
: a prompt to generate a response fortimestamps
: an array of timestamps to matchThe function generates a new prompt using the provided keywords
and messages
, and then uses the promptModel
to generate a response. It then filters the timestamps
array to only include timestamps that are mentioned in the response:
return timestamps
.filter(time => a3.match(time) || a3.match(new Date(parseInt(time)).toISOString()))
This function takes in several parameters:
promptModel
: a function to generate a promptsession
: a session IDprompt
: a prompt to generate a response forkeywords
: an array of keywords to matchThe function initializes several variables:
matchingTimestamps
: an array to store matching timestampsmessages
: an initial message stringoriginalTimestamp
: a copy of the initial message stringloadedConversations
: an array of conversation IDs that match the current session and modelThe function then iterates over the loadedConversations
array, processing each conversation:
let message = conversation[timestamps[j]]
let topics = keywords.filter(key => message.keywords.match(key))
if (!prompt.match(timestamps[j]) && topics.length == 0) {
continue
}
If the prompt does not match the timestamp and no keywords match, it skips to the next timestamp.
messages
string.messages
string exceeds a certain length (2048 characters), it calls the askLlamaMatchTimestamps
function to generate a response for the current messages
string and appends the returned timestamps to the matchingTimestamps
array.messages
string to the original timestamp.Finally, the function returns the matchingTimestamps
array.