404
+ +Page not found
+ + +diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..26df910 --- /dev/null +++ b/404.html @@ -0,0 +1,110 @@ + + +
+ + + + +Page not found
+ + +Auto-generate code documentation in Markdown format in seconds.
+Automated documentation of programming source code is a challenging task with significant practical and scientific implications for the developer community. ReadmeReady is a large language model (LLM)-based application that developers can use as a support tool to generate basic documentation for any publicly available or custom repository. Over the last decade, several research have been done on generating documentation for source code using neural network architectures. With the recent advancements in LLM technology, some open-source applications have been developed to address this problem. However, these applications typically rely on the OpenAI APIs, which incur substantial financial costs, particularly for large repositories. Moreover, none of these open-source applications offer a fine-tuned model or features to enable users to fine-tune custom LLMs. Additionally, finding suitable data for fine-tuning is often challenging. Our application addresses these issues.
+ReadmeReady is available only on Linux/Windows.
+Please follow the installation guide here to install python-magic
.
The simplest way to install ReadmeReady and its dependencies is from PyPI with pip, Python's preferred package installer.
+pip install readme_ready
+
+In order to upgrade ReadmeReady to the latest version, use pip as follows.
+$ pip install -U readme_ready
+
+You can also install ReadmeReady from source as follows.
+$ git clone https://github.com/souradipp76/ReadMeReady.git
+$ cd ReadMeReady
+$ make install
+
+To create a virtual environment before installing ReadmeReady, you can use the command:
+$ make virtualenv
+$ source .venv/bin/activate
+
+$ export OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
+$ export HF_TOKEN=<YOUR_HUGGINGFACE_TOKEN>
+
+Set OPENAI_API_KEY=dummy
to use only open-source models.
$ python -m readme_ready
+#or
+$ readme_ready
+
+from readme_ready.query import query
+from readme_ready.index import index
+from readme_ready.types import (
+ AutodocReadmeConfig,
+ AutodocRepoConfig,
+ AutodocUserConfig,
+ LLMModels,
+)
+
+model = LLMModels.LLAMA2_7B_CHAT_GPTQ # Choose model from supported models
+
+repo_config = AutodocRepoConfig (
+ name = "<NAME>", # Replace <NAME>
+ root = "<PROJECT_ROOT>", # Replace <PROJECT_ROOT>
+ repository_url = "<PROJECT_URL>", # Replace <PROJECT_URL>
+ output = "<OUTPUT_DIR>", # Replace <OUTPUT_DIR>
+ llms = [model],
+ peft_model_path = "<PEFT_MODEL_NAME_OR_PATH>", # Replace <PEFT_MODEL_NAME_OR_PATH>
+ ignore = [
+ ".*",
+ "*package-lock.json",
+ "*package.json",
+ "node_modules",
+ "*dist*",
+ "*build*",
+ "*test*",
+ "*.svg",
+ "*.md",
+ "*.mdx",
+ "*.toml"
+ ],
+ file_prompt = "",
+ folder_prompt = "",
+ chat_prompt = "",
+ content_type = "docs",
+ target_audience = "smart developer",
+ link_hosted = True,
+ priority = None,
+ max_concurrent_calls = 50,
+ add_questions = False,
+ device = "auto", # Select device "cpu" or "auto"
+)
+
+user_config = AutodocUserConfig(
+ llms = [model]
+)
+
+readme_config = AutodocReadmeConfig(
+ headings = "Description,Requirements,Installation,Usage,Contributing,License"
+)
+
+index.index(repo_config)
+query.generate_readme(repo_config, user_config, readme_config)
+
+Run the sample script in the examples/example.py
to see a typical code usage.
readme_ready.index.index
+
+
+Index
+ + + + + + + + +index(config)
+
+Indexes a repository to generate documentation and vector store files.
+Processes the repository specified in the config to create JSON files, converts them to Markdown format, +and builds a vector store from the Markdown documents. Creates the necessary directories for JSON, +Markdown, and data outputs as specified in the configuration.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
readme_ready.index.create_vector_store
+
+
+Create Vector Store
+ + + + + + + + +create_vector_store(root, output, ignore, llms, device)
+
+Creates a vector store from Markdown documents.
+Loads documents from the specified root directory, splits the text into chunks, +creates a vector store using the selected LLM model, and saves the vector store +to the output path. Ignores files matching the patterns provided in the ignore list.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
Raises: | +
+
|
+
---|
readme_ready.index.process_repository
+
+
+Process Repository
+ + + + + + + + +process_repository(config, dry_run=False)
+
+Creates a vector store from Markdown documents.
+Loads documents from the specified root directory, splits the text into chunks, +creates a vector store using the selected LLM model, and saves the vector store +to the output path. Ignores files matching the patterns provided in the ignore list.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
Raises: | +
+
|
+
---|
readme_ready.query.query
+
+
+Query
+ + + + + + + + +query(repo_config, user_confg)
+
+Queries the repository for information based on user input.
+Initializes a question-answering chain, displays a welcome message, and enters a loop to +prompt the user for questions about the repository. Processes each question by invoking +the QA chain, updates the chat history, and displays the response in Markdown format. The +loop continues until the user types 'exit'.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
Raises: | +
+
|
+
---|
generate_readme(repo_config, user_config, readme_config)
+
+Generates a README file based on repository and user configurations.
+Initializes a README generation chain, clears the terminal, and prepares the output file. +Iterates over the specified headings in the README configuration, generates content for each +section by invoking the chain, and writes the content in Markdown format to the README file. +Handles any RuntimeError that occurs during the process.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
Raises: | +
+
|
+
---|
readme_ready.query.create_chat_chain
+
+
+Create Chat Chain
+ + + + + + + + +make_qa_chain(project_name, repository_url, content_type, chat_prompt, target_audience, vectorstore, llms, device='cpu', on_token_stream=None)
+
+Creates a question-answering (QA) chain for the specified project.
+Initializes and configures the QA chain using the provided repository and user configurations. +Selects the appropriate language model (LLM), sets up the retriever with a history-aware mechanism, +and combines document chains for processing queries. The chain facilitates interaction with the +vector store to retrieve and process relevant information based on user queries.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
Raises: | +
+
|
+
---|
make_readme_chain(project_name, repository_url, content_type, chat_prompt, target_audience, vectorstore, llms, peft_model=None, device='cpu', on_token_stream=None)
+
+Creates a README generation chain for the specified project.
+Initializes and configures the README generation chain using the provided repository, user, and README configurations. +Selects the appropriate language model (LLM), sets up the document processing chain with the specified prompts, +and integrates with the vector store to generate comprehensive README sections based on project data. +The chain facilitates automated generation of README files tailored to the project's specifications.
+ + +Parameters: | +
+
|
+
---|
Returns: | +
+
|
+
---|
Raises: | +
+
|
+
---|
' + escapeHtml(summary) +'
' + noResultsText + '
'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..d35dca6 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"ReadmeReady Auto-generate code documentation in Markdown format in seconds. What is ReadmeReady? Automated documentation of programming source code is a challenging task with significant practical and scientific implications for the developer community. ReadmeReady is a large language model (LLM)-based application that developers can use as a support tool to generate basic documentation for any publicly available or custom repository. Over the last decade, several research have been done on generating documentation for source code using neural network architectures. With the recent advancements in LLM technology, some open-source applications have been developed to address this problem. However, these applications typically rely on the OpenAI APIs, which incur substantial financial costs, particularly for large repositories. Moreover, none of these open-source applications offer a fine-tuned model or features to enable users to fine-tune custom LLMs. Additionally, finding suitable data for fine-tuning is often challenging. Our application addresses these issues. Installation ReadmeReady is available only on Linux/Windows. Dependencies Please follow the installation guide here to install python-magic . Install it from PyPI The simplest way to install ReadmeReady and its dependencies is from PyPI with pip, Python's preferred package installer. pip install readme_ready In order to upgrade ReadmeReady to the latest version, use pip as follows. $ pip install -U readme_ready Install it from source You can also install ReadmeReady from source as follows. $ git clone https://github.com/souradipp76/ReadMeReady.git $ cd ReadMeReady $ make install To create a virtual environment before installing ReadmeReady, you can use the command: $ make virtualenv $ source .venv/bin/activate Usage Initialize $ export OPENAI_API_KEY=