Skip to content

Commit

Permalink
Updated README and added example.
Browse files Browse the repository at this point in the history
  • Loading branch information
souradipp76 committed Nov 17, 2024
1 parent 3ad795b commit 248100b
Show file tree
Hide file tree
Showing 5 changed files with 106 additions and 35 deletions.
8 changes: 2 additions & 6 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ jobs:
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
# fail_ci_if_error: true
# fail_ci_if_error: true

# tests_mac:
# needs: linter
Expand Down Expand Up @@ -91,9 +91,5 @@ jobs:
# run: python.exe -m pip install --upgrade pip
# - name: Install project
# run: pip install -e .[test]
# - name: Set OPENAI API Key
# run: |
# chcp 65001
# echo "OPENAI_API_KEY=dummy" >> $env:GITHUB_ENV
# - name: run tests
# run: pytest -s -vvvv -l --tb=long tests
# run: set OPENAI_API_KEY=dummy & pytest -s -vvvv -l --tb=long tests
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ lint: ## Run pep8, black, mypy linters.

.PHONY: test
test: lint ## Run tests and generate coverage report.
$(ENV_PREFIX)pytest -v --cov-config .coveragerc --cov=readme_ready -l --tb=short --maxfail=1 tests/
$(ENV_PREFIX)
$(ENV_PREFIX)OPENAI_API_KEY=dummy pytest -v --cov-config .coveragerc --cov=readme_ready -l --tb=short --maxfail=1 tests/
$(ENV_PREFIX)coverage xml
$(ENV_PREFIX)coverage html

Expand Down
72 changes: 45 additions & 27 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ $ export OPENAI_API_KEY=<YOUR_OPENAI_API_KEY>
$ export HF_TOKEN=<YOUR_HUGGINGFACE_TOKEN>
```

Set `OPENAI_API_KEY=dummy` to use only open-source models.

### Command-Line

```bash
Expand All @@ -59,17 +61,25 @@ $ readme_ready
### In Code

```py
from readme_ready import query
from readme_ready import index

repo_config = {
"name": <NAME>, # Replace <NAME>
"root": <PROJECT_ROOT>, # Replace <PROJECT_ROOT>
"repository_url": <PROJECT_URL>, # Replace <PROJECT_URL>
"output": <OUTPUT_DIR>, # Replace <OUTPUT_DIR>
"llms": [<MODEL_NAME_OR_PATH>], # Replace <MODEL_NAME_OR_PATH>
"peft_model_path": <PEFT_MODEL_NAME_OR_PATH>, # Replace <PEFT_MODEL_NAME_OR_PATH>
"ignore": [
from readme_ready.query import query
from readme_ready.index import index
from readme_ready.types import (
AutodocReadmeConfig,
AutodocRepoConfig,
AutodocUserConfig,
LLMModels,
)

model = LLMModels.LLAMA2_7B_CHAT_HF # Choose model from supported models

repo_config = AutodocRepoConfig (
name = "<NAME>", # Replace <NAME>
root = "<PROJECT_ROOT>", # Replace <PROJECT_ROOT>
repository_url = "<PROJECT_URL>", # Replace <PROJECT_URL>
output = "<OUTPUT_DIR>", # Replace <OUTPUT_DIR>
llms = [model],
peft_model_path = "<PEFT_MODEL_NAME_OR_PATH>", # Replace <PEFT_MODEL_NAME_OR_PATH>
ignore = [
".*",
"*package-lock.json",
"*package.json",
Expand All @@ -82,24 +92,32 @@ repo_config = {
"*.mdx",
"*.toml"
],
"file_prompt": "",
"folder_prompt": "",
"chat_prompt": "",
"content_type": "docs",
"target_audience": "smart developer",
"link_hosted": True,
"priority": None,
"max_concurrent_calls": 50,
"add_questions": False
}

user_config = {
"llms": [model]
}
index.index(**repo_config)
query.generate_readme(**repo_config, **user_config)
file_prompt = "",
folder_prompt = "",
chat_prompt = "",
content_type = "docs",
target_audience = "smart developer",
link_hosted = True,
priority = None,
max_concurrent_calls = 50,
add_questions = False,
device = "cuda", # Select device "cuda" or "cpu"
)

user_config = AutodocUserConfig(
llms = [model]
)

readme_config = AutodocReadmeConfig(
headings = "# Description, # Requirements, # Installation, # Usage, # Contributing, # License"
)

index.index(repo_config)
query.generate_readme(repo_config, user_config, readme_config)
```

Run the sample script in the `examples/example.py` to see a typical code usage.

### Finetuning

For finetuning on custom datasets, follow the instructions below.
Expand Down
56 changes: 56 additions & 0 deletions examples/example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""
Example
"""
from readme_ready.query import query
from readme_ready.index import index
from readme_ready.types import (
AutodocReadmeConfig,
AutodocRepoConfig,
AutodocUserConfig,
LLMModels,
)

model = LLMModels.TINYLLAMA_1p1B_CHAT_GGUF # Choose model from supported models

repo_config = AutodocRepoConfig (
name = "readmy_ready",
root = "./readme_ready",
repository_url = "https://github.com/souradipp76/ReadMeReady",
output = "./output/readmy_ready",
llms = [model],
peft_model_path = None,
ignore = [
".*",
"*package-lock.json",
"*package.json",
"node_modules",
"*dist*",
"*build*",
"*test*",
"*.svg",
"*.md",
"*.mdx",
"*.toml"
],
file_prompt = "",
folder_prompt = "",
chat_prompt = "",
content_type = "docs",
target_audience = "smart developer",
link_hosted = True,
priority = None,
max_concurrent_calls = 50,
add_questions = False,
device = "cpu",
)

user_config = AutodocUserConfig(
llms = [model]
)

readme_config = AutodocReadmeConfig(
headings = "# Description, # Requirements, # Installation, # Usage, # Contributing, # License"
)

index.index(repo_config)
query.generate_readme(repo_config, user_config, readme_config)
2 changes: 1 addition & 1 deletion readme_ready/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def url_validator(x):
).ask()

device = questionary.select(
message="Device?", choices=["cpu", "gpu"], default="cpu"
message="Device?", choices=["cpu", "cuda"], default="cpu"
).ask()

match model_name:
Expand Down

0 comments on commit 248100b

Please sign in to comment.