diff --git a/.github/workflows/pr-path-detection.yml b/.github/workflows/pr-path-detection.yml new file mode 100644 index 00000000..9d12c346 --- /dev/null +++ b/.github/workflows/pr-path-detection.yml @@ -0,0 +1,151 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +name: Check Paths and Hyperlinks + +on: + pull_request: + branches: [main] + types: [opened, reopened, ready_for_review, synchronize] + +jobs: + check-the-validity-of-hyperlinks-in-README: + runs-on: ubuntu-latest + steps: + - name: Clean Up Working Directory + run: sudo rm -rf ${{github.workspace}}/* + + - name: Checkout Repo docs + uses: actions/checkout@v4 + + - name: Check the Validity of Hyperlinks + run: | + cd ${{github.workspace}} + fail="FALSE" + url_lines=$(grep -Eo '\]\(http[s]?://[^)]+\)' --include='*.md' -r .|grep -Ev 'linkedin') + if [ -n "$url_lines" ]; then + for url_line in $url_lines; do + url=$(echo "$url_line"|cut -d '(' -f2 | cut -d ')' -f1|sed 's/\.git$//') + path=$(echo "$url_line"|cut -d':' -f1 | cut -d'/' -f2-) + if [[ "https://intel.sharepoint.com/:v:/s/mlconsultingandsupport/EZa7vjON10ZCpMvE7U-SPMwBRXbVHqe1Ybsa-fmnXayNUA?e=f6FPsl" == "$url" || "https://intel.sharepoint.com/:v:/s/mlconsultingandsupport/ESMIcBseFTdIuqkoB7TZy6ABfwR9CkfV49TvTa1X_Jihkg?e=zMH7O7" == "$url" ]]; then + echo "Link "$url" from ${{github.workspace}}/$path need to be verified by a real person." + else + response=$(curl -L -s -o /dev/null -w "%{http_code}" "$url") + if [ "$response" -ne 200 ]; then + echo "**********Validation failed, status code: $response, try again**********" + response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url") + if [ "$response_retry" -eq 200 ]; then + echo "*****Retry successful*****" + else + urls_line+=("$url_line") + echo "Status code: $response_retry, Link $url validation failed, will retry later." + fi + fi + fi + done + fi + echo "**************Start Retry**************" + for link in "${urls_line[@]}"; do + url=$(echo "$link"|cut -d '(' -f2 | cut -d ')' -f1|sed 's/\.git$//') + path=$(echo "$link"|cut -d':' -f1 | cut -d'/' -f2-) + attempt_num=1 + while [ $attempt_num -le 5 ]; do + do_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url") + if [ "$do_retry" -eq 200 ]; then + echo "$url Retry successful" + break + else + echo "$url Validation failed, retrying..." + ((attempt_num++)) + sleep 10 + fi + done + + if [ $attempt_num -gt 5 ]; then + echo "Invalid link from ${{github.workspace}}/$path: $url status code: $do_retry" + fail="TRUE" + fi + done + + if [[ "$fail" == "TRUE" ]]; then + exit 1 + else + echo "All hyperlinks are valid." + fi + shell: bash + + check-the-validity-of-relative-path: + runs-on: ubuntu-latest + steps: + - name: Clean up Working Directory + run: sudo rm -rf ${{github.workspace}}/* + + - name: Checkout Repo docs + uses: actions/checkout@v4 + + - name: Checking Relative Path Validity + run: | + cd ${{github.workspace}} + fail="FALSE" + repo_name=${{ github.event.pull_request.head.repo.full_name }} + if [ "$(echo "$repo_name"|cut -d'/' -f1)" != "opea-project" ]; then + owner=$(echo "${{ github.event.pull_request.head.repo.full_name }}" |cut -d'/' -f1) + branch="https://github.com/$owner/docs/tree/${{ github.event.pull_request.head.ref }}" + else + branch="https://github.com/opea-project/docs/blob/${{ github.event.pull_request.head.ref }}" + fi + link_head="https://github.com/opea-project/docs/blob/main" + IFS=$'\n' + png_lines=$(grep -Eo '\]\([^)]+\)' --include='*.md' -r .|grep -Ev 'http'|grep -Ev 'mailto') + if [ -n "$png_lines" ]; then + for png_line in $png_lines; do + refer_path=$(echo "$png_line"|cut -d':' -f1 | cut -d'/' -f2-) + png_path=$(echo "$png_line"|cut -d '(' -f2 | cut -d ')' -f1) + if [[ "${png_path:0:1}" == "/" ]]; then + check_path=${{github.workspace}}$png_path + elif [[ "${png_path:0:1}" == "#" ]]; then + check_path=${{github.workspace}}/$refer_path$png_path + else + check_path=${{github.workspace}}/$(dirname "$refer_path")/$png_path + fi + real_path=$(realpath $check_path) + if [ $? -ne 0 ]; then + echo "Path $png_path in file ${{github.workspace}}/$refer_path does not exist" + fail="TRUE" + else + url=$link_head$(echo "$real_path" | sed 's|.*/docs||') + response=$(curl -I -L -s -o /dev/null -w "%{http_code}" "$url") + if [ "$response" -ne 200 ]; then + echo "**********Validation failed, status code: $response try again**********" + response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url") + if [ "$response_retry" -eq 200 ]; then + echo "*****Retry successful*****" + else + echo "Retry failed. Check branch ${{ github.event.pull_request.head.ref }}" + url_dev=$branch$(echo "$real_path" | sed 's|.*/docs||') + response=$(curl -I -L -s -o /dev/null -w "%{http_code}" "$url_dev") + if [ "$response" -ne 200 ]; then + echo "**********Validation failed, status code: $response_retry, try again**********" + response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url_dev") + if [ "$response_retry" -eq 200 ]; then + echo "*****Retry successful*****" + else + echo "Invalid path from ${{github.workspace}}/$refer_path: $png_path status code: $response_retry" + echo "$png_line" + fail="TRUE" + fi + else + echo "Check branch ${{ github.event.pull_request.head.ref }} successfully." + fi + fi + fi + fi + done + fi + IFS=$OLDIFS + if [[ "$fail" == "TRUE" ]]; then + exit 1 + else + echo "All relative links valid." + fi + shell: bash diff --git a/community/CONTRIBUTING.md b/community/CONTRIBUTING.md index 7da855f3..37e230ae 100644 --- a/community/CONTRIBUTING.md +++ b/community/CONTRIBUTING.md @@ -370,7 +370,7 @@ The OPEA projects use GitHub Action for CI test. - End to End Test, the PR must pass all end to end tests. #### Pull Request Review -You can add reviewers from [the code owners list](../codeowner.md) to your PR. +You can add reviewers from [the code owners list](./codeowner.md) to your PR. ## Support diff --git a/community/rfcs/24-05-16-OPEA-001-Overall-Design.md b/community/rfcs/24-05-16-OPEA-001-Overall-Design.md index 7dff6306..f227a2fa 100644 --- a/community/rfcs/24-05-16-OPEA-001-Overall-Design.md +++ b/community/rfcs/24-05-16-OPEA-001-Overall-Design.md @@ -38,7 +38,7 @@ This RFC is used to present the OPEA overall design philosophy, including overal The proposed overall architecture is -![OPEA Architecture](opea_architecture.png "OPEA Architecture") +![OPEA Architecture](opea_architecture.png) 1. GenAIComps @@ -58,7 +58,7 @@ The proposed overall architecture is The proposed OPEA workflow is -![OPEA Workflow](opea_workflow.png "OPEA Workflow") +![OPEA Workflow](opea_workflow.png) 1. Microservice diff --git a/developer-guides/doc_guidelines.rst b/developer-guides/doc_guidelines.rst index dcaf0ff1..1aab7838 100644 --- a/developer-guides/doc_guidelines.rst +++ b/developer-guides/doc_guidelines.rst @@ -3,20 +3,20 @@ Documentation Guidelines ######################## -OPEA Project content is written using the `markdown`_ (``.md``) with `MyST extensions`_ and `reStructuredText`_ markup -language (``.rst``) with `Sphinx extensions`_, and processed +OPEA Project content is written using the `markdown`_ (``.md``) with `MyST extensions`_ +and `reStructuredText`_ markup language (``.rst``) with `Sphinx extensions`_, and processed using `Sphinx`_ to create a formatted stand-alone website. Developers can -view this content either in its raw form as ``.md`` and ``.rst`` markup files, or (with -Sphinx installed) they can build the documentation using the Makefile -(on Linux systems) to generate the HTML content. The HTML content can then be -viewed using a web browser. These ``.md`` and ``.rst`` files are maintained in -the project's GitHub repos and processed to create the -`OPEA Project documentation`_ website. +view this content either in its raw form as ``.md`` and ``.rst`` markup files, or +build the documentation locally following the :ref:`opea_doc_generation` instructions. +The HTML content can then be viewed using a web browser. These ``.md`` and +``.rst`` files are maintained in the project's GitHub repos and processed to +create the `OPEA Project documentation`_ website. .. note:: While GitHub supports viewing `.md` and `.rst` content with your browser on the `github.com` site, markdown and reST extensions are not recognized there, so the best viewing experience is through the `OPEA Project documentation`_ github.io - website. + website. The github.io site also provides navigation and searching that makes + it easier to find and read what you're looking for. You can read details about `reStructuredText`_ and `Sphinx extensions`_, and `markdown`_ and `MyST extensions`_ from their respective websites. @@ -31,14 +31,15 @@ You can read details about `reStructuredText`_ and `Sphinx extensions`_, and This document provides a quick reference for commonly used markdown and reST with MyST and Sphinx-defined directives and roles used to create the documentation -you're reading. +you're reading. It also provides best-known-methods for working with a mixture +of reStructuredText and markdown. Markdown vs. RestructuredText ***************************** Both markdown and ReStructureText (reST) let you create individual documentation files that GitHub can render when viewing them in your browser on github.com. Markdown is -popular because of it's familarity with developers and is the default markup +popular because of it's familiarity with developers and is the default markup language for StackOverflow, Reddit, GitHub, and others. ReStructuredText came from the Python community in 2001 and became noticed outside that community with the release of Sphinx in 2008. These days, reST is supported by GitHub @@ -47,7 +48,7 @@ OpenCV and LLVM/Clang. ReStructuredText is more fully-featured, much more standardized and uniform, and has built-in support for extensions. The markdown language has no standard way -to implement complete documentation systems and doesnt have a standard extension +to implement complete documentation systems and doesn't have a standard extension mechanism, which leads to many different "flavors" of markdown. If you stick to the core and common markdown syntax (headings, paragraphs, lists, and such), using markdown is just fine. However, slipping in raw HTML to do formatting @@ -59,15 +60,15 @@ markdown content within the OPEA project. Within the OPEA documentation, we use both markdown and reST files for the documentation "leaves". We rely on reST for the documentation organization trunk and -branches, through the use of the reST toctree directives. +branches, through the use of the reST ``toctree`` directives. Documentation Organization ************************** Documentation is maintained and updated the same as the project's code within -the opea-project GitHub repos. There are many ``README.md`` files within the various +the opea-project GitHub repos. There are many ``README.md`` and other markdown files within the various repos along with the other files for those components. This is good because it -keeps the relevent documentation and code for that component together. +keeps the relevant documentation and code for that component together. We use the ``docs`` repo to organize the presentation of all these ``README.md`` files, along with other project related documents that are maintained in the @@ -79,6 +80,9 @@ directive to point to other documents that may include additional ``toctree`` directives of their own, ultimately collecting all the content into an organizational structure you can navigate. +Ultimately every document file (``.md`` and ``.rst``) in the project must appear +in the ``toctree`` hierarchy. An orphan document file will be flagged by Sphinx +as not included in a toctree directive. Headings ******** @@ -156,7 +160,8 @@ Headings intervening H2 ``##``. You may skip heading levels on the way back up, for example, from an H4 ``####`` back up to an H2 ``##`` as appropriate. - + Sphinx will complain if it finds multiple H1 headings or if you skip a + heading level. Content Highlighting @@ -180,12 +185,12 @@ Some common reST and markdown inline markup samples: .. group-tab:: markdown - * one back quote: ```text``` for `inline code` samples. + * one back quote: ```text``` for ``inline code`` samples. For inline markup, the characters between the beginning and ending characters must not start or end with a space, so ``*this is italics*``, (*this is italics*) while ``* this isn't*`` -(* this isn't*). +(* this isn't*) because of that extra space after the first asterisk. If an asterisk or back quote appears in running text and could be confused with inline markup delimiters, you can eliminate the confusion by adding a @@ -203,16 +208,46 @@ should be indented at the same level as the preceding paragraph (and not indented itself). For numbered lists -start with a ``1.`` or ``a)`` for example, and continue with autonumbering by -using a ``#`` sign and a ``.`` or ``)`` as used in the first list item. +start with a ``1.`` or ``a.`` for example, and continue with autonumbering by +using a ``#`` sign and a ``.`` as used in the first list item. Indent continuation lines with spaces to align with the text of first list item: +It's important to maintain the indentation of content under a list so in the +generated HTML, the content looks like it's part of that list and not a new +paragraph outside of that list. + +For example, compare this: + +----- + +* Here's a bullet list item + +Here's a paragraph that should be part of that first bullet list item's content. + +* Here's a second bullet list item + +----- + +Notice how that middle paragraph is out-dented from the bullet list compared +with this next example where it's not (yes, it's subtle): + +----- + +* Here's a bullet list item + + Here's a paragraph that does look like it's part of that first bullet list item's content because it's indented in the source. + +* Here's a second bullet list item + +----- + + .. code-block:: rest * This is a bulleted list. * It has two items, the second - item and has more than one line of reST text. Additional lines + item and has more than one line of text. Additional lines are indented to the first character of the text of the bullet list. @@ -220,9 +255,9 @@ list item: it would be a continuation of the previous list (or paragraph). #. It has two items too. - a) This is a numbered list using alphabetic list headings - #) It has three items (and uses autonumbering for the rest of the list) - #) Here's the third item. Use consistent punctuation on the list + a. This is a numbered list using alphabetic list headings + #. It has three items (and uses autonumbering for the rest of the list) + #. Here's the third item. Use consistent punctuation on the list number. #. This is an autonumbered list (default is to use numbers starting @@ -234,74 +269,87 @@ list item: #. And a second item back in the containing list. No blank line needed, but it wouldn't hurt for readability. -Definition lists (with one or more terms and their definition) are a -convenient way to document a word or phrase with an explanation. For example, -this reST content: +.. tabs:: -.. code-block:: rest + .. group-tab:: reST - The Makefile has targets that include: + Definition lists (with one or more terms and their definition) are a + convenient way to document a word or phrase with an explanation. For example, + this reST content: - ``html`` - Build the HTML output for the project + .. code-block:: rest + + The Makefile has targets that include: + + ``html`` + Build the HTML output for the project - ``clean`` - Remove all generated output, restoring the folders to a - clean state. + ``clean`` + Remove all generated output, restoring the folders to a + clean state. -Would be rendered as: + Would be rendered as: - The Makefile has targets that include: + The Makefile has targets that include: - html - Build the HTML output for the project + html + Build the HTML output for the project + + clean + Remove all generated output, restoring the folders to a + clean state. + + .. group-tab:: markdown - clean - Remove all generated output, restoring the folders to a - clean state. + Definition lists aren't directly supported by markdown. Multi-Column Lists ****************** -In reST, if you have a long bullet list of items, where each item is short, you can -indicate that the list items should be rendered in multiple columns with a -special ``.. rst-class:: rst-columns`` directive. The directive will apply to -the next non-comment element (for example, paragraph) or to content indented under -the directive. For example, this unordered list:: - - .. rst-class:: rst-columns - - * A list of - * short items - * that should be - * displayed - * horizontally - * so it doesn't - * use up so much - * space on - * the page - -would be rendered as: - -.. rst-class:: rst-columns - - * A list of - * short items - * that should be - * displayed - * horizontally - * so it doesn't - * use up so much - * space on - * the page - -A maximum of three columns will be displayed if you use ``rst-columns`` -(or ``rst-columns3``), and two columns for ``rst-columns2``. The number -of columns displayed can be reduced based on the available width of the -display window, reducing to one column on narrow (phone) screens if necessary. - -.. note:: We've deprecated use of the ``hlist`` directive because it - misbehaves on smaller screens. +.. tabs:: + + .. group-tab:: reST + + In reST, if you have a long bullet list of items, where each item is short, you can + indicate that the list items should be rendered in multiple columns with a + special ``.. rst-class:: rst-columns`` directive. The directive will apply to + the next non-comment element (for example, paragraph) or to content indented under + the directive. For example, this unordered list:: + + .. rst-class:: rst-columns + + * A list of + * short items + * that should be + * displayed + * horizontally + * so it doesn't + * use up so much + * space on + * the page + + would be rendered as: + + .. rst-class:: rst-columns + + * A list of + * short items + * that should be + * displayed + * horizontally + * so it doesn't + * use up so much + * space on + * the page + + A maximum of three columns will be displayed if you use ``rst-columns`` + (or ``rst-columns3``), and two columns for ``rst-columns2``. The number + of columns displayed can be reduced based on the available width of the + display window, reducing to one column on narrow (phone) screens if necessary. + + .. group-tab:: markdown + + Multi-column lists aren't directly supported by markdown. Tables ****** @@ -311,7 +359,8 @@ There are a few ways to create tables, each with their limitations or quirks. `_ offer the most capability for defining merged rows and columns (where content spans multiple rows or columns, but are hard to maintain because the grid -characters must be aligned throughout the table:: +characters must be aligned throughout the table. They are supported in both +reST and markdown:: +------------------------+------------+----------+----------+ | Header row, column 1 | Header 2 | Header 3 | Header 4 | @@ -341,46 +390,69 @@ This example would render as: | body row 4 | ... | ... | too | +------------------------+------------+----------+----------+ -For reST, `List tables -`_ -are much easier to maintain, but don't support row or column spans:: +.. tabs:: + + .. group-tab:: reST - .. list-table:: Table title - :widths: 15 20 40 - :header-rows: 1 + For reST, `List tables `_ + are much easier to maintain, but don't support row or column spans:: + + .. list-table:: Table title + :widths: 15 20 40 + :header-rows: 1 + + * - Heading 1 + - Heading 2 + - Heading 3 + * - body row 1, column 1 + - body row 1, column 2 + - body row 1, column 3 + * - body row 2, column 1 + - body row 2, column 2 + - body row 2, column 3 + + This example would render as: + + .. list-table:: Table title + :widths: 15 20 40 + :header-rows: 1 + + * - Heading 1 + - Heading 2 + - Heading 3 + * - body row 1, column 1 + - body row 1, column 2 + - body row 1, column 3 + * - body row 2, column 1 + - body row 2, column 2 + - body row 2, column 3 + + The ``:widths:`` parameter lets you define relative column widths. The + default is equal column widths. If you have a three-column table and you + want the first column to be half as wide as the other two equal-width + columns, you can specify ``:widths: 1 2 2``. If you'd like the browser + to set the column widths automatically based on the column contents, you + can use ``:widths: auto``. - * - Heading 1 - - Heading 2 - - Heading 3 - * - body row 1, column 1 - - body row 1, column 2 - - body row 1, column 3 - * - body row 2, column 1 - - body row 2, column 2 - - body row 2, column 3 + .. group-tab:: markdown -This example would render as: + Markdown also supports a more free-form table syntax where the rigid box + alignment is greatly simplified as explained in + `markdown tables `_. + Use three or more hyphens ``---`` to denote each column's header, and use + pipes ``|`` to separate each column. For compatibility you should also + add a pipe on both ends of the row:: + + | heading 1 | heading 2 | heading 3 | + |---|---|---| + |row 1 column 1 | row 1 column 2 | yes, it's row 1 column 3| + |row 2 col 1 | row 2 column 2 | row 2 col 3 | + + That would be rendered as: + + .. include:: mdtable.txt + :parser: myst_parser.sphinx_ -.. list-table:: Table title - :widths: 15 20 40 - :header-rows: 1 - - * - Heading 1 - - Heading 2 - - Heading 3 - * - body row 1, column 1 - - body row 1, column 2 - - body row 1, column 3 - * - body row 2, column 1 - - body row 2, column 2 - - body row 2, column 3 - -The ``:widths:`` parameter lets you define relative column widths. The -default is equal column widths. If you have a three-column table and you -want the first column to be half as wide as the other two equal-width -columns, you can specify ``:widths: 1 2 2``. If you'd like the browser -to set the column widths automatically based on the column contents, you -can use ``:widths: auto``. File Names and Commands *********************** @@ -512,11 +584,55 @@ Internal Cross-Reference Linking ``:ref:`alternate text ``` (renders as :ref:`alternate text `). + Linking from a reST document to a markdown document is done using the reST + ``:doc:`` role, and using the path to the markdown file leaving off the + ``.md`` file extension. For example:: + + Refer to the :doc:`/GenAIExamples/supported_examples` list for details. + + Note that all the markdown files from all the repos are available with + this syntax because we copy all those files into the doc building folder + under a top-level directory with that repo's name. Markdown files in the + docs repo don't use the ``docs`` repo name as the path root but use ``/`` + instead. So to link to the contribution guide markdown file found in the + docs repo community directory you would use ``:doc:`Contribution Guide + ```. Notice you can change the link text using + the normal reST role syntax shown here. + .. group-tab:: markdown - TODO + Markdown supports linking to other documents using the ``[link text](link path)``. + For example to link to a document within the same repo, a relative path is + used:: + + Refer to [Kubernetes deployment](./kubernetes/intel/README_gmc.md) + + That reference is rendered as a reference to the README_gmc.html found in + the directory ``kubernetes/intel`` relative to the document doing the + linking. + + References to documents in other repos within the OPEA project are made + using an URL to the document in the github.com repo as it would be found + in a web browser. For example, from a markdown document in the + GenAIExamples repo referencing a document in the GenAIInfra repo:: + + Refer to the [DocSum helm chart](https://github.com/opea-project/GenAIInfra/tree/main/helm-charts/docsum/README.md) + for instructions on deploying DocSum into Kubernetes on Xeon & Gaudi. + + That reference would be rendered into a reference to the + https://opea-project.github.io/GenAIInfra/helm-charts/docsum/README.html + document within the github.io website. + + Markdown supports linking to a reST document by using the Myst syntax that + mimics the way reST documents link to each other using the ``:ref:`` role + and using the label at the beginning of the reST document. For example:: + {ref}`ChatQnA Example Deployment Options ` + The ChatQnA example deployment options document found at + ``examples/ChatQnA/deploy/index.rst`` has that + ``chatqna-example-deployment`` label at the top we can + reference instead of knowing the path to the document. Non-ASCII Characters ******************** @@ -575,11 +691,11 @@ markdown content by using an ``include`` directive. ``:parser: myst_parser.sphinx_`` if the included file is markdown. \:start-after\: text - Only the content after the first occurance of the specified ``text`` in + Only the content after the first occurrence of the specified ``text`` in the external file will be included. \:end-before\: - Only the content before the first occurance of the specified ``text`` + Only the content before the first occurrence of the specified ``text`` in the external file will be included. These and other options described in the `docutils include directive `_ @@ -824,7 +940,7 @@ Drawings text description language to render drawings. For more information, see :ref:`graphviz-examples`. - We'v ealso included an extension providing ``mermaid`` support that also enables + We've also included an extension providing ``mermaid`` support that also enables that text description language to render drawings using:: .. mermaid:: @@ -877,7 +993,7 @@ Drawings Alternative Tabbed Content ************************** -In ResST, instead of creating multiple documents with common material except for some +In reST, instead of creating multiple documents with common material except for some specific sections, you can write one document and provide alternative content to the reader via a tabbed interface. When the reader clicks a tab, the content for that tab is displayed. For example:: diff --git a/developer-guides/docbuild.rst b/developer-guides/docbuild.rst index 2bb456e5..dffdf0fe 100644 --- a/developer-guides/docbuild.rst +++ b/developer-guides/docbuild.rst @@ -15,33 +15,60 @@ Documentation Overview ********************** OPEA project content is written using combination of markdown (``.md``) and -reStructuredText (``.rst``) markup languages (with Sphinx extensions), and -processed using Sphinx to create a formatted stand-alone website. Developers can -view this content either in its raw form as .rst markup files, or you can -generate the HTML content and view it with a web browser directly on your -workstation. The best reading experience is by viewing the generated HTML at +reStructuredText (``.rst``) markup languages (with Sphinx extensions such as +`Myst `_), and +processed using Sphinx to create a formatted stand-alone website. +The best reading experience is by viewing the generated HTML at https://opea-project.github.io. +While working on new content or editing existing content, developers can +generate the HTML content locally and view it with a web browser. You can read details about `markdown`_, `reStructuredText`_, and `Sphinx`_ from their respective websites. -The project's documentation contains the following items: - -* ReStructuredText and markdown source files used to generate documentation found at the - https://opea-project.github.io website. All of the documentation sources - are found in the ``github.com/opea-project`` repos, rooted in the ``docs`` repo. - There's also documentation in the repos where the project's code is - maintained: ``GenAIComps``, ``GenAIEval``, ``GenAIExamples``, and ``GenAIInfra``. +The project's documentation is a collection of ReStructuredText and markdown +source files used to generate documentation found at the +https://opea-project.github.io website. All of the documentation sources are +found in the ``github.com/opea-project`` project repos, organized and rooted in +the ``docs`` repo. Much of the detailed documentation lives in the repos where +the project's code is maintained: ``GenAIComps``, ``GenAIEval``, +``GenAIExamples``, and ``GenAIInfra``. The documentation generation process +collects all the needed files from all these repos into one building area to +create the final generated HTML. .. graphviz:: images/doc-gen-flow.dot :align: center :caption: Documentation Generation Flow +Some content is manipulated or generated during the doc build process: + +- Because markdown doesn't directly support cross-repo document linking, we use + full URLs to link to the markdown files in other project repos, for example a + link to a document in the GenAIInfra repo from a document in the GenAIExamples + repo would look like this: + + ``` + See [GMC Install](https://github.com/opea-project/GenAIInfra/tree/main/microservices-connector/README.md). + ``` + + That link works when reading the markdown file in the github.com repo, but + should be changed to reference the generated HTML file in the github.io + rendering. + +- When new examples appear in the GenAIExamples repo, the indexing page that + lists all the examples is updated automatically by generating it at doc build + time by scanning the directory structure. The list of microservices is also + self-updated when new microservices are added to the GenAIComps/comps + directory. + +- References in markdown files to markdown files (.md file extension) are + converted to the corresponding generated HTML files by Sphinx using the Myst and + sphinx-md extensions. Set Up the Documentation Working Folders **************************************** -You'll need ``git`` installed to get the working folders set up: +To begin, you'll need ``git`` installed to get the working folders set up: * For an Ubuntu development system use: @@ -50,7 +77,7 @@ You'll need ``git`` installed to get the working folders set up: sudo apt install git Here's the recommended folder setup for documentation contributions and -generation, a parent folder called ``opea-project`` holds six locally +generation, a parent folder called ``opea-project`` holds five locally cloned repos from the opea-project. You can use a different name for the parent folder but the doc build process assumes the repo names are as shown here: @@ -62,11 +89,9 @@ folder but the doc build process assumes the repo names are as shown here: ├── GenAIEval ├── GenAIExamples ├── GenAIInfra - ├── opea-project.github.io The parent ``opea-project`` folder is there to organize the cloned repos -from the project. If you have repo publishing rights, we'll also be cloning the -publishing repo opea-project.github.io later in these steps. +from the project. In the following steps, you'll create a fork of all the upstream OPEA project repos needed to build the documentation to your personal GitHub account, clone @@ -81,8 +106,8 @@ structure: :align: center :class: drop-shadow -#. At a command prompt, create a working folder on your development computer and - clone your personal ``docs`` repository: +#. At a command prompt, create the top working folder on your development + computer and clone your personal ``docs`` repository there: .. code-block:: bash @@ -97,21 +122,23 @@ structure: cd docs git remote add upstream https://github.com/opea-project/docs.git - After that, you'll have ``origin`` pointing to your cloned personal repo and - ``upstream`` pointing to the project repo. + After that, you'll have ``origin`` pointing to your cloned personal ``docs`` + repo and ``upstream`` pointing to the project ``docs`` repo. + +#. Return to the parent directory with ``cd ..`` -#. Do the same steps (fork to your personal account, clone to your local - computer, and setup the git upstream remote) for the other repos containing - project documentation, replacing the docs.git repo name in the previous step +#. Now do the same steps (fork to your personal account, clone to your local + computer, and setup the git upstream remote) for the other four repos + replacing the docs.git repo name in the previous step with the appropriate repo name in this list: - * GenAIComps - * GenAIEval - * GenAIExamples - * GenAIInfra + * ``GenAIComps`` + * ``GenAIEval`` + * ``GenAIExamples`` + * ``GenAIInfra`` -#. If you haven't done so already, be sure to configure git with your name +#. If you haven't done so already, configure git with your name and email address for the ``Signed-off-by`` line in your commit messages: .. code-block:: bash @@ -122,7 +149,8 @@ structure: Install the Documentation Tools ******************************* -Our documentation processing has been tested to run with Python 3.8.10 and +Our documentation processing has been tested to run on Ubuntu (both natively and +in Windows Subsystem for Windows (wsl) with Python 3.8.10 and later, and these other tools: * sphinx version: 7.3.0 @@ -130,13 +158,17 @@ later, and these other tools: * sphinx-rtd-theme version: 2.0.0 * sphinx-tabs version: 3.4.5 * myst-parser version: 3.0.1 +* sphinx-md version: 0.0.3 * sphinxcontrib-mermaid version: 0.9.2 * pymarkdownlnt version: 0.9.21 -Depending on your Linux version, install the needed tools. You should consider -using the `Python virtual environment`_ -tools to maintain your Python environment from being changed by other work on -your computer. +Depending on your Linux version, install the needed tools. + +.. important:: + + You should consider using the `Python virtual environment`_ tools + to maintain your Python environment from being changed by other work on your + computer. .. _Python virtual environment: https://https://docs.python.org/3/library/venv.html @@ -203,8 +235,8 @@ Sphinx supports easy customization of the generated HTML documentation appearance through the use of themes. The ``sphinx-rtd-theme`` (Read The Docs) theme is installed as part of the ``requirements.txt`` list above. Tweaks to the standard ``read-the-docs`` appearance are added by using CSS and JavaScript -customization found in ``doc/_static``, and theme template overrides found in -``doc/_templates``. If you change to another theme, you'll need to tweak +customization found in ``doc/sphinx/_static``, and theme template overrides found in +``doc/sphinx/_templates``. If you change to another theme, you'll need to tweak these customizations, not something for the faint of heart. The Sphinx build system creates document cache information that attempts to @@ -216,39 +248,53 @@ environment and a ``make html`` again generally fixes these issues. Run the Documentation Processors ******************************** -The ``docs`` folder (with all cloned sibling repos) have all the doc source files, +The ``docs`` folder (with all the cloned sibling repos) have all the doc source files, images, extra tools, and ``Makefile`` for generating a local copy of the OPEA technical documentation. It's best to start with a clean doc-build environment so use ``make clean`` to remove the ``_build`` working folder if it exists. The ``Makefile`` creates the ``_build`` folder (if it doesn't exist) and copies all needed files from these cloned repos into the ``_build/rst`` working folder. +Normally you'd have each repo checked out at the main branch before you run the +``make html`` step. The doc build process uses the five repo's contents to +create the HTML site. If you're working on changes to documentation in a repo +and have those changes on a branch other than main, you can still generate the +documentation with that branch's changes -- this is how you can verify your +changes will not generate errors when your branch with changes is merged with +the main branch. + .. code-block:: bash cd ~/opea-project/docs make clean make html -Depending on your development system, it will take less a minute to collect and +Depending on your development system, it will take about a minute to collect and generate the HTML content. When done, you can view the HTML output in ``~/opea-project/docs/_build/html/index.html``. As a convenience, there's a make target that will ``cd`` to the ``_build/html`` -folder and run a local Python web server: +folder and run a local Python web server on port 8000: .. code-block:: bash make server -and use your web browser to open the URL: ``http://localhost:8000``. When +Use your web browser to open the URL: ``http://localhost:8000`` and wander +around your local site and view the results of your changes. When done, press :kbd:`ctrl-C` in your command-prompt window to stop the web server. +If things look good, you'd proceed to using git (``git add .``) to add and commit +(``git commit -s``) your changes, push those changes to your personal forked +repo (``git push origin ``) and submit a PR using the GitHub web +interface. + Publish Content *************** -If you have merge rights to the opea-project repo called -``opea-project.github.io``, you can update the public project documentation -found at https://opea-project.github.io. +If you have merge rights to the opea-project ``opea-project.github.io`` repo, +you can update the public project documentation found at +https://opea-project.github.io. You'll need to do a one-time clone of the upstream repo (we publish directly to the upstream repo rather than to a personal forked copy): @@ -269,10 +315,10 @@ This uses git commands to synchronize the new content with what's already published and will delete files in the publishing repo's **latest** folder that are no longer needed. New or changed files from the newly-generated HTML content are pushed to the GitHub pages -publishing repo. The public site at https://opea-project.github.io will -be automatically updated by the `GitHub pages system -`_, typically within a few -minutes. +publishing repo (``opea-project.github.io.git``. The public site at +https://opea-project.github.io will be automatically updated by the +`GitHub pages system `_, +typically within a few minutes. Document Versioning ******************* @@ -290,8 +336,7 @@ specifically something like this: 'docs_title': docs_title, 'is_release': is_release, 'versions': ( ("latest", "/latest/"), - ("0.8", "/0.8/"), - ("0.7", "/0.7/"), + ("1.0", "/1.0/"), ) } @@ -304,12 +349,12 @@ version from an older one, without going to ``latest`` first. By default, documentation build and publishing both assume we're generating documentation for the main branch and publishing to the ``/latest/`` area on https://opea-project.github.io. When we're generating the documentation for a -tagged version (e.g., 0.8), check out that version of **all** the component +tagged version (e.g., 1.0), check out that version of **all** the component repos, and add some extra flags to the ``make`` commands: .. code-block:: bash - version=0.8 + version=1.0 for d in docs GenAIComps GenAIExamples GenAIEval GenAIInfra ; do cd ~/opea-project/$d git checkout $version @@ -355,7 +400,7 @@ a heading level problem on lines 5 and 111 in If you do a ``make html`` without first doing a ``make clean``, there may be files left behind from a previous build that can cause some unexpected messages -to be reported. +to be reported. If things look suspicious, do a ``make clean;make html`` again. If all messages were filtered away, the build process will report as successful, reporting: @@ -369,9 +414,16 @@ The output from the Sphinx build is processed by the Python script configuration files in the ``.known-issues`` folder. (This filtering is done as part of the ``Makefile``.) -You can modify the filtering by adding or editing a conf file in the +The filtering tool matches and removes whole line and multi-line patterns to +remove them. Anything left behind is considered a message that should be +reported. You can modify the filtering by adding or editing a conf file in the ``.known-issues`` folder, following the examples found there. +Multi-line patterns can get rather complex. We're not using any multi-line patterns in +the OPEA project. You can see complex examples in other open source projects +using this filtering script, such as pattern files in +`Project ACRN .known-issues `_. + .. _reStructuredText: https://sphinx-doc.org/rest.html .. _markdown: https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax .. _Sphinx: https://sphinx-doc.org/ diff --git a/developer-guides/images/doc-gen-flow.dot b/developer-guides/images/doc-gen-flow.dot index 4e7fb3aa..b52ff843 100644 --- a/developer-guides/images/doc-gen-flow.dot +++ b/developer-guides/images/doc-gen-flow.dot @@ -10,10 +10,16 @@ digraph docgen { conf [shape="rectangle" label="conf.py\nconfiguration"] rtd [shape="rectangle" label="read-the-docs\ntheme"] html [shape="rectangle" label="HTML\nweb site"] - sphinx[shape="ellipse" label="sphinx +\ndocutils"] + includes [shape="rectangle" label="includes"] + sphinx[shape="ellipse" label="sphinx +\nMyst Parser +\ndocutils"] + prepare[shape="ellipse" label="fix\ncross-repo\nlinks"] + scan[shape="ellipse" label="scan\nrepo dirs"] images -> sphinx + scan -> includes + includes -> sphinx rst -> sphinx - md -> sphinx + md -> prepare + prepare -> sphinx conf -> sphinx rtd -> sphinx sphinx -> html diff --git a/developer-guides/mdtable.txt b/developer-guides/mdtable.txt new file mode 100644 index 00000000..e9d91efc --- /dev/null +++ b/developer-guides/mdtable.txt @@ -0,0 +1,4 @@ +| heading 1 | heading 2 | heading 3 | +|---|---|---| +|row 1 column 1 | row 1 column 2 | yes, it's row 1 column 3| +|row 2 col 1 | row 2 column 2 | row 2 col 3 | diff --git a/examples/ChatQnA/ChatQnA_Guide.rst b/examples/ChatQnA/ChatQnA_Guide.rst index 475c8ba5..f7ba23e1 100644 --- a/examples/ChatQnA/ChatQnA_Guide.rst +++ b/examples/ChatQnA/ChatQnA_Guide.rst @@ -111,7 +111,7 @@ The architecture follows a series of steps to process user queries and generate chatbot's answer. Expected Output -+============== +=============== TBD @@ -206,30 +206,10 @@ The gateway serves as the interface for users to access. The gateway routes inco Deployment ********** -From the below deployment options, choose the one that best fits your requirements: +See the :ref:`chatqna-example-deployment` that includes both single-node and +orchestrated multi-node configurations, and choose the one that best fits your +requirements. -Single Node -=========== - -.. toctree:: - :maxdepth: 1 - - Xeon Scalable Processor - Gaudi AI Accelerator - Nvidia GPU - AI PC - -Kubernetes -========== - -* Xeon & Gaudi with GMC -* Xeon & Gaudi without GMC -* Using Helm Charts - -Cloud Native -============ - -* Red Hat OpenShift Container Platform (RHOCP) Troubleshooting *************** @@ -396,4 +376,4 @@ Log in to Grafana using the default credentials: Summary and Next Steps ======================= -TBD \ No newline at end of file +TBD diff --git a/examples/ChatQnA/deploy/index.rst b/examples/ChatQnA/deploy/index.rst new file mode 100644 index 00000000..0c3c3e55 --- /dev/null +++ b/examples/ChatQnA/deploy/index.rst @@ -0,0 +1,29 @@ +.. _chatqna-example-deployment: + +ChatQnA Example Deployment Options +################################## + +Here are some deployment options, depending on your hardware and environment: + +Single Node +*********** + +.. toctree:: + :maxdepth: 1 + + Xeon Scalable Processor + Gaudi AI Accelerator + Nvidia GPU + AI PC + +Kubernetes +********** + +* Xeon & Gaudi with GMC +* Xeon & Gaudi without GMC +* Using Helm Charts + +Cloud Native +************ + +* Red Hat OpenShift Container Platform (RHOCP) diff --git a/examples/ChatQnA/deploy/xeon.txt b/examples/ChatQnA/deploy/xeon.txt deleted file mode 100644 index 54bc54ed..00000000 --- a/examples/ChatQnA/deploy/xeon.txt +++ /dev/null @@ -1,75 +0,0 @@ -.. _ChatQnA_deploy_xeon: - - -Single Node On-Prem Deployment: XEON Scalable Processors -######################################################## - -e.g use case: -Should provide context for selecting between vLLM and TGI. - -.. tabs:: - - .. tab:: Deploy with Docker compose with vLLM - - TODO: The section must cover how the above said archi can be implemented - with vllm mode, or the serving model chosen. Show an Basic E2E end case - set up with 1 type of DB for e.g Redis based on what is already covered in - chatqna example( others can be called out or referenced to accordingly), - Show how to use one SOTA model, for llama3 and others with a sample - configuration. The use outcome must demonstrate on a real use case showing - both productivity and performance. For consistency, lets use the OPEA - documentation for RAG use cases - - Sample titles: - - 1. Overview - Talk a few lines of what is expected in this tutorial. Forer.g. Redis - db used and llama3 model run to showcase an e2e use case using OPEA and - vllm. - #. Pre-requisites - Includes cloning the repos, pulling the necessary containers if - available (UI, pipeline ect), setting the env variables like proxys, - getting access to model weights, get tokens on hf, lg etc. sanity - checks if needed. Etc. - #. Prepare (Building / Pulling) Docker images - a) This step will involve building/pulling ( maybe in future) relevant docker images with step-by-step process along with sanity check in the end - #) If customization is needed, we show 1 case of how to do it - - #. Use case setup - - This section will include how to get the data and other - dependencies needed, followed by all the micoservice envs ready. Use - this section to also talk about how to set other models if needed, how - to use other dbs etc - - #. Deploy chatqna use case based on the docker_compose - - This should cover the steps involved in starting the microservices - and megaservies, also explaining some key highlights of what’s covered - in the docker compose. Include sanity checks as needed. Each - microservice/megaservice start command along with what it does and the - expected output will be good to add - - #. Interacting with ChatQnA deployment. ( or navigating chatqna workflow) - - This section to cover how to use a different machine to interact and - validate the microservice and walk through how to navigate each - services. For e.g uploading local document for data prep and how to get - answers? Customer will be interested in getting the output for a query, - and a time also measure the quality of the model and the perf metrics( - Health and Statistics to also be covered). Please check if these - details can also be curled in the endpoints. Is uploading templates - available now?. Custom template is available today - - Show all the customization available and features - - #. Additional Capabilities (optional) - Use case specific features to call out - - #. Launch the UI service - Show steps how to launch the UI and a sample screenshot of query and output - - - .. tab:: Deploy with docker compose with TGI - - This section will be similar to vLLM. Should be worth trying to single source. diff --git a/examples/index.rst b/examples/index.rst index 4428f522..18ce1704 100644 --- a/examples/index.rst +++ b/examples/index.rst @@ -9,6 +9,7 @@ GenAIExamples are designed to give developers an easy entry into generative AI, :maxdepth: 1 ChatQnA/ChatQnA_Guide + ChatQnA/deploy/index ---- diff --git a/getting-started/README.md b/getting-started/README.md index 873a5444..536d6767 100644 --- a/getting-started/README.md +++ b/getting-started/README.md @@ -4,17 +4,19 @@ To get started with OPEA you need the right hardware and basic software setup. -Hardware Requirements: For the hardware configuration, If you need Hardware Access visit the Intel Tiber Developer Cloud to select from options such as Xeon or Gaudi processors that meet the necessary specifications. -Software Requirements: Please refer to the Support Matrix[ Hyper link needed] to ensure you have the required software components in place. +- Hardware Requirements: For the hardware configuration, If you need Hardware Access visit the Intel Tiber Developer Cloud to select from options such as Xeon or Gaudi processors that meet the necessary specifications. + +- Software Requirements: Refer to the [Support Matrix](https://github.com/opea-project/GenAIExamples/blob/main/README.md#getting-started) to ensure you have the required software components in place. ## Understanding OPEA's Core Components Before moving forward, it's important to familiarize yourself with two key elements of OPEA: GenAIComps and GenAIExamples. -1. GenAIComps: GenAIComps is a collection of microservice components that form a service-based toolkit. This includes a variety of services such as llm (language learning models), embedding, and reranking, among others. -2. GenAIExamples: While GenAIComps offers a range of microservices, GenAIExamples provides practical, deployable solutions to help users implement these services effectively. Examples include ChatQnA and DocSum, which leverage the microservices for specific applications. + +- GenAIComps is a collection of microservice components that form a service-based toolkit. This includes a variety of services such as llm (language learning models), embedding, and reranking, among others. +- While GenAIComps offers a range of microservices, GenAIExamples provides practical, deployable solutions to help users implement these services effectively. Examples include ChatQnA and DocSum, which leverage the microservices for specific applications. ## Visual Guide to Deployment -To illustrate, here's a simplified visual guide on deploying a ChatQnA GenAIExample, showcasing how you can set up this solution in just a few steps. +To illustrate, here's a simplified visual guide on deploying a ChatQnA GenAIExample, showcasing how you can set up this solution in just a few steps. ![Getting started with OPEA](assets/getting_started.gif) @@ -52,8 +54,8 @@ source ./docker_compose/intel/hpu/gaudi/set_env.sh source ./docker_compose/nvidia/gpu/set_env.sh ``` -### Deploy ChatQnA Megaservice and Microservices -Select the compose.yaml file that matches your hardware. +## Deploy ChatQnA Megaservice and Microservices +Select the directory containing the `compose.yaml` file that matches your hardware. ``` #xeon cd docker_compose/intel/cpu/xeon/ @@ -66,18 +68,21 @@ Now we can start the services ``` docker compose up -d ``` -It will automatically download the docker image on docker hub: +It will automatically download the needed docker images from docker hub: + - docker pull opea/chatqna:latest - docker pull opea/chatqna-ui:latest -In following cases, you will need to build docker image from source by yourself. +In the following cases, you will need to build the docker image from source by yourself. -1. Failed to download the docker image. -2. Use the latest or special version. +- The docker image failed to download. (You may want to first check the + [Docker Images](https://github.com/opea-project/GenAIExamples/blob/main/docker_images_list.md) + list and verify that the docker image you're downloading exists on dockerhub.) +- You want to use a different version than latest. -Please refer to the ['Build Docker Images'](/examples/ChatQnA/deploy) section from the file that matches your hardware. +Refer to the {ref}`ChatQnA Example Deployment Options ` section for building from source instructions matching your hardware. -### Interact with ChatQnA Megaservice and Microservice +## Interact with ChatQnA Megaservice and Microservice ``` curl http://${host_ip}:8888/v1/chatqna \ -H "Content-Type: application/json" \ @@ -85,14 +90,10 @@ curl http://${host_ip}:8888/v1/chatqna \ "messages": "What is the revenue of Nike in 2023?" }' ``` -This command will provide the response as a stream of text. You can modify the message parameter in the curl command and interact with the ChatQnA service. - -### What’s Next: +This command will provide the response as a stream of text. You can modify the `message` parameter in the `curl` command and interact with the ChatQnA service. -1. Try [GenAIExamples](/examples/index.rst) in-detail starting with [ChatQnA](/examples/ChatQnA/ChatQnA_Guide.rst) example. - -2. Try [GenAIComps](/microservices/index.rst) to build microservices. - -3. Interested in contributing to OPEA? Refer to [OPEA Community](/community/index.rst) and [Contribution Guides](/community/index.rst#contributing-guides). - +## What’s Next +- Try [GenAIExamples](/examples/index.rst) in-detail starting with [ChatQnA](/examples/ChatQnA/ChatQnA_Guide.rst) example. +- Try [GenAIComps](/microservices/index.rst) to build microservices. +- Interested in contributing to OPEA? Refer to [OPEA Community](/community/index.rst) and [Contribution Guides](/community/index.rst#contributing-guides). diff --git a/scripts/test/test.md b/scripts/test/test.md deleted file mode 100644 index 386fca09..00000000 --- a/scripts/test/test.md +++ /dev/null @@ -1,35 +0,0 @@ -# Test markdown file with cross-repo links - -This folder contains a collection of Kubernetes manifest files for deploying the ChatQnA service across scalable nodes. It includes a comprehensive [benchmarking tool](/GenAIEval/evals/benchmark/README.md) that enables throughput analysis to assess inference performance. - -We have created the [BKC manifest](https://github.com/opea-project/GenAIExamples/tree/main/ChatQnA/benchmark) for single node, two nodes and four nodes K8s cluster. In order to apply, we need to check out and configure some values. - -The test uses the [benchmark tool](https://github.com/opea-project/GenAIEval/tree/main/evals/benchmark) to do performance test. We need to set up benchmark tool at the master node of Kubernetes which is k8s-master. - -This document outlines the deployment process for a CodeGen application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Gaudi2 server. The steps include Docker images creation, container deployment via Docker Compose, and service execution to integrate microservices such as `llm`. We will publish the Docker images to the Docker Hub soon, further simplifying the deployment process for this service. - -Install GMC in your Kubernetes cluster, if you have not already done so, by following the steps in Section "Getting Started" at [GMC Install](https://github.com/opea-project/GenAIInfra/tree/main/microservices-connector#readme). We will soon publish images to Docker Hub, at which point no builds will be required, further simplifying install. - -If you get errors like "Access Denied", [validate micro service](https://github.com/opea-project/GenAIExamples/tree/main/CodeGen/docker_compose/intel/cpu/xeon#validate-microservices) first. - -Update Knowledge Base via Local File [nke-10k-2023.pdf](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/redis/data/nke-10k-2023.pdf) - -Please refer to [Xeon README](/GenAIExamples/AudioQnA/docker_compose/intel/cpu/xeon/README.md) or [Gaudi README](/GenAIExamples/AudioQnA/docker_compose/intel/hpu/gaudi/README.md) to build the OPEA images. These too will be available on Docker Hub soon to simplify use. - -Here's a [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/tei/Dockerfile) to a Docker file. - -You can take a look at the tools yaml and python files in this example. For more details, please refer to the "Provide your own tools" section in the instructions [here](https://github.com/opea-project/GenAIComps/tree/main/comps/agent/langchain#5-customize-agent-strategy). - -Here's another [Link](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/ui/docker/Dockerfile.react) to examine. - -Here is a nice one [Docker Xeon README](/GenAIExamples/DocSum/docker_compose/intel/cpu/xeon/README.md) and that with a section reference [Docker Xeon README](/GenAIExamples/DocSum/docker_compose/intel/cpu/xeon/README.md#section) - -And a reference to a python file [finetune_config](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/finetune_config.py) to keep things interesting. - -Here's an [issue](https://github.com/opea-project/GenAIExamples/issues/763) -reference and -[Actions](https://github.com/opea-project/GenAIExamples/actions) reference too. -Might as well test [PRs](https://github.com/opea-project/GenAIExamples/pulls) -and [Projects](https://github.com/opea-project/GenAIExamples/projects) too. - -In release notes will find [88b3c1](https://github.com/opea-project/GenAIInfra/commit/88b3c108e5b5e3bfb6d9346ce2863b69f70cc2f1) commit references. diff --git a/scripts/test/test.md.saved b/scripts/test/test.md.saved deleted file mode 100644 index 9d0c8fac..00000000 --- a/scripts/test/test.md.saved +++ /dev/null @@ -1,35 +0,0 @@ -# Test markdown file with cross-repo links - -This folder contains a collection of Kubernetes manifest files for deploying the ChatQnA service across scalable nodes. It includes a comprehensive [benchmarking tool](https://github.com/opea-project/GenAIEval/blob/main/evals/benchmark/README.md) that enables throughput analysis to assess inference performance. - -We have created the [BKC manifest](https://github.com/opea-project/GenAIExamples/tree/main/ChatQnA/benchmark) for single node, two nodes and four nodes K8s cluster. In order to apply, we need to check out and configure some values. - -The test uses the [benchmark tool](https://github.com/opea-project/GenAIEval/tree/main/evals/benchmark) to do performance test. We need to set up benchmark tool at the master node of Kubernetes which is k8s-master. - -This document outlines the deployment process for a CodeGen application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Gaudi2 server. The steps include Docker images creation, container deployment via Docker Compose, and service execution to integrate microservices such as `llm`. We will publish the Docker images to the Docker Hub soon, further simplifying the deployment process for this service. - -Install GMC in your Kubernetes cluster, if you have not already done so, by following the steps in Section "Getting Started" at [GMC Install](https://github.com/opea-project/GenAIInfra/tree/main/microservices-connector#readme). We will soon publish images to Docker Hub, at which point no builds will be required, further simplifying install. - -If you get errors like "Access Denied", [validate micro service](https://github.com/opea-project/GenAIExamples/tree/main/CodeGen/docker_compose/intel/cpu/xeon#validate-microservices) first. - -Update Knowledge Base via Local File [nke-10k-2023.pdf](https://github.com/opea-project/GenAIComps/blob/main/comps/retrievers/redis/data/nke-10k-2023.pdf) - -Please refer to [Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/AudioQnA/docker_compose/intel/cpu/xeon/README.md) or [Gaudi README](https://github.com/opea-project/GenAIExamples/blob/main/AudioQnA/docker_compose/intel/hpu/gaudi/README.md) to build the OPEA images. These too will be available on Docker Hub soon to simplify use. - -Here's a [Link](https://github.com/opea-project/GenAIComps/blob/main/comps/reranks/tei/Dockerfile) to a Docker file. - -You can take a look at the tools yaml and python files in this example. For more details, please refer to the "Provide your own tools" section in the instructions [here](https://github.com/opea-project/GenAIComps/tree/main/comps/agent/langchain#5-customize-agent-strategy). - -Here's another [Link](https://github.com/opea-project/GenAIExamples/blob/main/ChatQnA/ui/docker/Dockerfile.react) to examine. - -Here is a nice one [Docker Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/DocSum/docker_compose/intel/cpu/xeon/README.md) and that with a section reference [Docker Xeon README](https://github.com/opea-project/GenAIExamples/blob/main/DocSum/docker_compose/intel/cpu/xeon/README.md#section) - -And a reference to a python file [finetune_config](https://github.com/opea-project/GenAIComps/blob/main/comps/finetuning/finetune_config.py) to keep things interesting. - -Here's an [issue](https://github.com/opea-project/GenAIExamples/issues/763) -reference and -[Actions](https://github.com/opea-project/GenAIExamples/actions) reference too. -Might as well test [PRs](https://github.com/opea-project/GenAIExamples/pulls) -and [Projects](https://github.com/opea-project/GenAIExamples/projects) too. - -In release notes will find [88b3c1](https://github.com/opea-project/GenAIInfra/commit/88b3c108e5b5e3bfb6d9346ce2863b69f70cc2f1) commit references.