The llmVoice
function generates text-to-speech output using the LLaMA model, taking a prompt and optional session object as parameters. It sends the prompt to the model, returning the generated result and resetting the chat history if the provided session is the same as the current session.
async function llmVoice(prompt, session2) {
if(!session2) {
const {createSession} = await importer.import("llama vision")
session2 = await createSession('OuteTTS-0.3-1B-Q8_0.gguf', 'you are an llm that responds with medium quality text to voice\n')
}
let result = await session2.prompt(prompt, {
//maxTokens: context.contextSize,
temperature: 0.1,
repetition_penalty: 1.1,
onTextChunk: function (text) {
process.stdout.write(text)
}
})
if(session == session2)
session2.setChatHistory(initialChatHistory);
return result
}
module.exports = llmVoice
/**
* Generate voice output from a given prompt using LLaMA.
*
* @param {string} prompt The input text to generate voice output for.
* @param {object} session2 The session object, optional.
* @returns {Promise<string>} The generated voice output.
*/
async function llmVoice(prompt, session2) {
// Check if a session object is provided, otherwise create a new one
if (!session2) {
const { createSession } = await import('llama-vision');
// Comment: Importing the library using ES6 import syntax
session2 = await createSession(
'OuteTTS-0.3-1B-Q8_0.gguf',
'You are an LLM that responds with medium quality text to voice.\n'
);
// Comment: Store the session2 object to avoid circular dependency
this.session2 = session2;
}
// Set the chat history for the session
const initialChatHistory = 'Initial conversation history.';
session2.setChatHistory(initialChatHistory);
// Prompt LLaMA to generate voice output
try {
const result = await session2.prompt(prompt, {
// Temperature value between 0 (very conservative) and 1 (very sampling-heavy)
temperature: 0.1,
// A value used to penalize repeated tokens and encourage greater variety
repetition_penalty: 1.1,
// Function called for each chunk of the output text
onTextChunk: (text) => {
// Write the chunk of text to the standard output
process.stdout.write(text);
}
});
return result;
} catch (error) {
// Log any errors that occur during the prompt
console.error('Error generating voice output:', error);
throw error;
}
}
module.exports = llmVoice;
llmVoice
FunctionAn asynchronous function llmVoice
that generates text-to-speech output using the LLaMA model.
prompt
: The input text prompt to generate speech from.session2
: An optional session object. If not provided, a new session will be created.session2
is not provided, a new session is created using the importer.import('llama vision')
module.prompt
to the LLaMA model using the session2.prompt
method with the following parameters:
temperature
: Set to 0.1.repetition_penalty
: Set to 1.1.onTextChunk
: A callback function that writes the generated text to the standard output.session2
is the same as the current session, the chat history is reset.The llmVoice
function is exported as a module.