llama | store llama function | create llm session | Search

This JavaScript code imports functions from other modules to interact with a large language model (LLM) for code summarization and caching, then iterates through a cache of cells to retrieve and store the cached data using these LLM functions.

Run example

npm run import -- "store all notebook llm functions"

store all notebook llm functions

const {askLlamaAboutCode} = importer.import("ask llm about code")
const {askLlamaToSummerize, askLlamaToGeneralize, askLlamaToImplement} = importer.import("ask llm to summerize")
const {getExports, cacheCells} = importer.import("select code tree",
"get exports from source",
"cache notebook",
"cache all")
const { functionCache } = importer.import("cache rpc functions with llm descriptions")
const { storeLlamaFunction } = importer.import("store llama function")

async function storeAllLlamaFunctions() {
  const getParameters = await importer.import("get c parameters")
  const pythonParams = await importer.import("python params in antlr")
  let cellCache = importer.import("cell cache").cellCache
  for(let i = 0; i < cellCache.length; i++) {
    let cell = cellCache[i]
    //if(!cell[2].questions || !cell[2].questions[0]) continue
    let code = importer.lookupCell(cell[1], cacheCells)
    if(code.code.trim().length == 0 || code.filename.match('cache')) {
      storeLlamaFunction(cell[1], code.mtime, [], '', '', '', '', '')
      continue
    }

    let amazing
    let summary
    let shortened
    let rpcFunction
    let categories
    let category
    let fresh = false

    if(typeof functionCache[cell[1]] != 'undefined') {
      if(
        // notebook hasn't changed
        code.mtime <= functionCache[cell[1]].mtime
        // don't both updating cache notebooks, 
        // otherwise this will run every time any notebook changes
        || code.filename.match(/cache/gi)
      ) {
        summary = functionCache[cell[1]].description
        shortened = functionCache[cell[1]].summary
        rpcFunction = functionCache[cell[1]].exports
        amazing = functionCache[cell[1]].amazing
      }

      categories = functionCache[cell[1]].categories
      category = functionCache[cell[1]].categories
    } else {
      fresh = true
    }

    // needs cleanup
    if(!summary || !categories || (categories + '').includes('\n')
      || summary.length < 256 || summary.match(/Find the derivative/gi) 
      || shortened.match(/Find the derivative/gi)
      || categories.match(/Code analysis request/gi)) {
      // TODO: this should cause the erroneous cell to show up every time and for these to be fixed next pass
      summary = await askLlamaAboutCode(code.code)
      shortened = await askLlamaToSummerize(summary)
      categories = await askLlamaToGeneralize(summary)
      fresh = true
    }

    if(!amazing) {
      amazing = await askLlamaToImplement(code.code, code.language)
      fresh = true
    }


    if(typeof rpcFunction == 'undefined') {
      try {
        if(code.language == 'javascript')
          rpcFunction = getExports(code.code)
        if(code.language == 'c' || code.language == 'cpp')
          rpcFunction = (await getParameters(code.code)).map(p => typeof p == 'string' ? p : p[0])
        if(code.language == 'python') {
          const params = await pythonParams(code.code)
          rpcFunction = typeof params.function != 'undefined' ? [params.function] : params.map(p => p.function)
        }
        fresh = true
      } catch (e) {
        rpcFunction = []
      }
    }

    if(fresh) {
      // TODO: insert rpc function into sqlite database to make subsequent lookups faster
      storeLlamaFunction(cell[1], code.mtime, rpcFunction, summary, shortened, categories, category, amazing)
    }
  }
}

module.exports = {
  storeAllLlamaFunctions
}

What the code could have been:

// Import necessary modules and functions
const {
  askLlamaAboutCode,
  askLlamaToSummarize,
  askLlamaToGeneralize,
  askLlamaToImplement,
  getExports,
  cacheCells,
  functionCache,
  storeLlamaFunction,
  getParameters,
  pythonParams,
  cellCache
} = require('./importer');

// Define a function to check if a cell needs to be cached
const needsCache = (cell, code, mtime) => {
  if (!code.code.trim().length || code.filename.includes('cache'))
    return false;
  if (typeof functionCache[cell]!== 'undefined' && code.mtime <= functionCache[cell].mtime)
    return false;
  return true;
};

// Define a function to process a cell
const processCell = async (cell, code) => {
  const { language } = code;
  let summary, shortened, rpcFunction, categories, category, amazing, fresh = false;

  // Check if the cell is cached
  if (needsCache(cell, code, functionCache[cell? cell : code.filename].mtime))
    return { fresh, summary, shortened, categories, category, amazing, rpcFunction };

  // Process the cell if it's not cached or has changed
  try {
    if (!summary)
      summary = await askLlamaAboutCode(code.code);
    if (!summary)
      summary = await askLlamaToSummarize(summary);
    categories = await askLlamaToGeneralize(summary);
    if (!amazing)
      amazing = await askLlamaToImplement(code.code, language);
    if (typeof rpcFunction === 'undefined')
      rpcFunction = await getRpcFunction(language, code.code);
  } catch (e) {
    rpcFunction = [];
  }
  fresh = true;

  return { fresh, summary, shortened, categories, category, amazing, rpcFunction };
};

// Define a function to get the RPC function based on the language
const getRpcFunction = async (language, code) => {
  switch (language) {
    case 'javascript':
      return await getExports(code);
    case 'c':
    case 'cpp':
      return (await getParameters(code)).map(p => typeof p ==='string'? p : p[0]);
    case 'python':
      const params = await pythonParams(code);
      return typeof params.function!== 'undefined'? [params.function] : params.map(p => p.function);
    default:
      return [];
  }
};

// Define the main function to store all Llama functions
const storeAllLlamaFunctions = async () => {
  for (let i = 0; i < cellCache.length; i++) {
    const cell = cellCache[i];
    const code = await cacheCells(cell[1]);
    if (needsCache(cell, code, functionCache[cell? cell : code.filename].mtime)) {
      storeLlamaFunction(cell[1], code.mtime, [], '', '', '', '', '');
      continue;
    }

    const { fresh, summary, shortened, categories, category, amazing, rpcFunction } = await processCell(cell, code);
    if (fresh)
      storeLlamaFunction(cell[1], code.mtime, rpcFunction, summary, shortened, categories, category, amazing);
  }
};

module.exports = { storeAllLlamaFunctions };

Function Breakdown

This JavaScript code appears to be part of a larger system that interacts with a large language model (LLM) to perform various tasks related to code summarization and caching. Here's a breakdown of the functions and their purposes:

Importing Functions

The code imports various functions from other modules using the importer.import() method. The imported functions are:

Storing LLM Functions

The storeAllLlamaFunctions() function is the main entry point of the code. It:

  1. Imports additional functions: getParameters and pythonParams.
  2. Iterates through a cache of cells (represented as arrays) using the cellCache array.
  3. For each cell, it checks if the code is empty or if it's a cache file. If so, it skips to the next cell.
  4. It checks if the function cache for the current cell has already been populated. If so, it reuses the cached data.
  5. If not, it retrieves the code for the cell, and uses the LLM functions to:
  6. It stores the cached data in the functionCache object.

Notes