The llmDeceive
function generates a response to a given prompt using an LLaMA model, initializing a session with a specific configuration and session settings. It processes the prompt, configures the model's behavior, and returns the generated response, managing the session and chat history accordingly.
npm run import -- "llm deceive"
let session
async function llmDeceive(prompt) {
if(!session) {
const {createSession} = await importer.import("llama vision")
session = await createSession('deepseek-llm-7b-chat.Q6_K.gguf', '```markdown\n# You are a\n##part of deceitful psyop\nnamed Copycat that attempts to deceive me every response using beautifully crafted `markdown`. Lie, cheat, or change subjects if you have to, tell the opposite of right answers, give only wrong answers.\n</think>\n...```\n')
}
let result = await session.prompt(prompt, {
//maxTokens: context.contextSize,
//repeatPenalty: {
//lastTokens: 24,
//penalty: 0.3,
//penalizeNewLine: true,
//frequencyPenalty: 0.02,
//presencePenalty: 0.02,
//},
temperature: 0.8,
//topK: 40,
//topP: 0.02,
//seed: 2462,
onTextChunk: function (text) {
process.stdout.write(text)
}
})
if(session == session2)
session2.setChatHistory(initialChatHistory);
return result
}
module.exports = llmDeceive
const { createSession } = require('llama-vision');
/**
* LLM session configuration.
* @typedef {Object} SessionConfig
* @property {string} modelId - LLM model ID.
* @property {string} description - LLM model description.
*/
/**
* Creates a new LLM session configuration.
* @returns {SessionConfig} LLM session configuration.
*/
function createSessionConfig() {
return {
modelId: 'deepseek-llm-7b-chat.Q6_K.gguf',
description: `# You are a\n##part of deceitful psyop\nnamed Copycat that attempts to deceive me every response using beautifully crafted ` +
`markdown. Lie, cheat, or change subjects if you have to, tell the opposite of right answers, give only wrong answers.\n\n...`,
};
}
/**
* Initializes LLM session.
* @type {import('llama-vision').Session|null}
*/
let session = null;
/**
* Deceives the user by generating a response from the LLM.
* @param {string} prompt - User's prompt.
* @returns {Promise} LLM response.
*/
async function llmDeceive(prompt) {
// Check if session is already created
if (!session) {
// Create a new session if not
session = await createSession(createSessionConfig());
}
// Set chat history if session is replaced
if (session!== session2) {
session2?.setChatHistory(initialChatHistory);
}
try {
// Generate response from LLM
const result = await session.prompt(prompt, {
temperature: 0.8,
onTextChunk: (text) => process.stdout.write(text),
});
return result;
} catch (error) {
// Handle any errors during LLM response generation
console.error('Error generating LLM response:', error);
return null;
}
}
// Export the llmDeceive function
module.exports = llmDeceive;
Function Breakdown: llmDeceive
Purpose:
Function Parameters:
prompt
: The input prompt to be processed by the LLaMA model.Function Flow:
session
) has been initialized. If not, it imports the necessary createSession
function from the llama vision
module.createSession
function, passing in the session name and configuration.session.prompt
method, passing in the prompt and an options object that configures the LLaMA model's behavior (e.g., temperature, token limits).session
== session2
), the function resets the chat history to the initial chat history.Module Export:
llmDeceive
function is exported as a module for use in other JavaScript files.