diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4a83bca..f8011bf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -54,7 +54,7 @@ jobs: uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} - # fail_ci_if_error: true + # fail_ci_if_error: true # tests_mac: # needs: linter @@ -91,9 +91,5 @@ jobs: # run: python.exe -m pip install --upgrade pip # - name: Install project # run: pip install -e .[test] - # - name: Set OPENAI API Key - # run: | - # chcp 65001 - # echo "OPENAI_API_KEY=dummy" >> $env:GITHUB_ENV # - name: run tests - # run: pytest -s -vvvv -l --tb=long tests + # run: set OPENAI_API_KEY=dummy & pytest -s -vvvv -l --tb=long tests diff --git a/Makefile b/Makefile index 28bcd85..bea5b68 100644 --- a/Makefile +++ b/Makefile @@ -39,7 +39,8 @@ lint: ## Run pep8, black, mypy linters. .PHONY: test test: lint ## Run tests and generate coverage report. - $(ENV_PREFIX)pytest -v --cov-config .coveragerc --cov=readme_ready -l --tb=short --maxfail=1 tests/ + $(ENV_PREFIX) + $(ENV_PREFIX)OPENAI_API_KEY=dummy pytest -v --cov-config .coveragerc --cov=readme_ready -l --tb=short --maxfail=1 tests/ $(ENV_PREFIX)coverage xml $(ENV_PREFIX)coverage html diff --git a/README.md b/README.md index b68cd4d..638e508 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,8 @@ $ export OPENAI_API_KEY= $ export HF_TOKEN= ``` +Set `OPENAI_API_KEY=dummy` to use only open-source models. + ### Command-Line ```bash @@ -59,17 +61,25 @@ $ readme_ready ### In Code ```py -from readme_ready import query -from readme_ready import index - -repo_config = { - "name": , # Replace - "root": , # Replace - "repository_url": , # Replace - "output": , # Replace - "llms": [], # Replace - "peft_model_path": , # Replace - "ignore": [ +from readme_ready.query import query +from readme_ready.index import index +from readme_ready.types import ( + AutodocReadmeConfig, + AutodocRepoConfig, + AutodocUserConfig, + LLMModels, +) + +model = LLMModels.LLAMA2_7B_CHAT_HF # Choose model from supported models + +repo_config = AutodocRepoConfig ( + name = "", # Replace + root = "", # Replace + repository_url = "", # Replace + output = "", # Replace + llms = [model], + peft_model_path = "", # Replace + ignore = [ ".*", "*package-lock.json", "*package.json", @@ -82,24 +92,32 @@ repo_config = { "*.mdx", "*.toml" ], - "file_prompt": "", - "folder_prompt": "", - "chat_prompt": "", - "content_type": "docs", - "target_audience": "smart developer", - "link_hosted": True, - "priority": None, - "max_concurrent_calls": 50, - "add_questions": False -} - -user_config = { - "llms": [model] -} -index.index(**repo_config) -query.generate_readme(**repo_config, **user_config) + file_prompt = "", + folder_prompt = "", + chat_prompt = "", + content_type = "docs", + target_audience = "smart developer", + link_hosted = True, + priority = None, + max_concurrent_calls = 50, + add_questions = False, + device = "cuda", # Select device "cuda" or "cpu" +) + +user_config = AutodocUserConfig( + llms = [model] +) + +readme_config = AutodocReadmeConfig( + headings = "# Description, # Requirements, # Installation, # Usage, # Contributing, # License" +) + +index.index(repo_config) +query.generate_readme(repo_config, user_config, readme_config) ``` +Run the sample script in the `examples/example.py` to see a typical code usage. + ### Finetuning For finetuning on custom datasets, follow the instructions below. diff --git a/examples/example.py b/examples/example.py new file mode 100644 index 0000000..333d1be --- /dev/null +++ b/examples/example.py @@ -0,0 +1,56 @@ +""" +Example +""" +from readme_ready.query import query +from readme_ready.index import index +from readme_ready.types import ( + AutodocReadmeConfig, + AutodocRepoConfig, + AutodocUserConfig, + LLMModels, +) + +model = LLMModels.TINYLLAMA_1p1B_CHAT_GGUF # Choose model from supported models + +repo_config = AutodocRepoConfig ( + name = "readmy_ready", + root = "./readme_ready", + repository_url = "https://github.com/souradipp76/ReadMeReady", + output = "./output/readmy_ready", + llms = [model], + peft_model_path = None, + ignore = [ + ".*", + "*package-lock.json", + "*package.json", + "node_modules", + "*dist*", + "*build*", + "*test*", + "*.svg", + "*.md", + "*.mdx", + "*.toml" + ], + file_prompt = "", + folder_prompt = "", + chat_prompt = "", + content_type = "docs", + target_audience = "smart developer", + link_hosted = True, + priority = None, + max_concurrent_calls = 50, + add_questions = False, + device = "cpu", +) + +user_config = AutodocUserConfig( + llms = [model] +) + +readme_config = AutodocReadmeConfig( + headings = "# Description, # Requirements, # Installation, # Usage, # Contributing, # License" +) + +index.index(repo_config) +query.generate_readme(repo_config, user_config, readme_config) \ No newline at end of file diff --git a/readme_ready/main.py b/readme_ready/main.py index 655df03..311597c 100644 --- a/readme_ready/main.py +++ b/readme_ready/main.py @@ -87,7 +87,7 @@ def url_validator(x): ).ask() device = questionary.select( - message="Device?", choices=["cpu", "gpu"], default="cpu" + message="Device?", choices=["cpu", "cuda"], default="cpu" ).ask() match model_name: