From a532fa56cd8054824149a151cf2afb76f62ad465 Mon Sep 17 00:00:00 2001 From: <> Date: Sat, 21 Sep 2024 17:39:50 +0000 Subject: [PATCH] Deployed e07f550 with MkDocs version: 1.6.1 --- .nojekyll | 0 404.html | 2273 ++++++ api/guide/index.html | 5022 ++++++++++++ api/index.html | 2390 ++++++ api/json_schema/index.html | 3850 ++++++++++ api/models/index.html | 5240 +++++++++++++ api/parsing/index.html | 3991 ++++++++++ api/prompts/index.html | 3866 ++++++++++ api/regex/index.html | 2625 +++++++ api/samplers/index.html | 4133 ++++++++++ assets/_mkdocstrings.css | 119 + assets/images/dottxt.png | Bin 0 -> 42419 bytes assets/images/favicon.png | Bin 0 -> 1870 bytes assets/images/logits_processing_diagram.svg | 157 + assets/images/logo-simple.png | Bin 0 -> 2196 bytes assets/images/logo-square.png | Bin 0 -> 2233 bytes assets/images/logo-square.svg | 124 + assets/images/logo.png | Bin 0 -> 6386 bytes assets/images/logo.svg | 134 + assets/images/normal_computing.jpg | Bin 0 -> 64918 bytes assets/images/social/api/guide.png | Bin 0 -> 7013 bytes assets/images/social/api/index.png | Bin 0 -> 7013 bytes assets/images/social/api/json_schema.png | Bin 0 -> 7013 bytes assets/images/social/api/models.png | Bin 0 -> 7013 bytes assets/images/social/api/parsing.png | Bin 0 -> 7013 bytes assets/images/social/api/prompts.png | Bin 0 -> 7013 bytes assets/images/social/api/regex.png | Bin 0 -> 7013 bytes assets/images/social/api/samplers.png | Bin 0 -> 7013 bytes assets/images/social/blog/archive/2024.png | Bin 0 -> 7013 bytes .../images/social/blog/category/roadmap.png | Bin 0 -> 7013 bytes assets/images/social/blog/index.png | Bin 0 -> 7013 bytes .../images/social/blog/posts/roadmap-2024.png | Bin 0 -> 7013 bytes assets/images/social/community/contribute.png | Bin 0 -> 7013 bytes assets/images/social/community/examples.png | Bin 0 -> 7013 bytes assets/images/social/community/feedback.png | Bin 0 -> 7013 bytes assets/images/social/community/index.png | Bin 0 -> 7013 bytes assets/images/social/community/versioning.png | Bin 0 -> 7013 bytes .../social/cookbook/chain_of_density.png | Bin 0 -> 7013 bytes .../social/cookbook/chain_of_thought.png | Bin 0 -> 7013 bytes .../images/social/cookbook/classification.png | Bin 0 -> 7013 bytes .../social/cookbook/dating_profiles.png | Bin 0 -> 7013 bytes .../social/cookbook/deploy-using-bentoml.png | Bin 0 -> 7013 bytes .../cookbook/deploy-using-cerebrium.png | Bin 0 -> 7013 bytes .../social/cookbook/deploy-using-modal.png | Bin 0 -> 7013 bytes assets/images/social/cookbook/extraction.png | Bin 0 -> 7013 bytes assets/images/social/cookbook/index.png | Bin 0 -> 7013 bytes .../cookbook/knowledge_graph_extraction.png | Bin 0 -> 7013 bytes .../social/cookbook/models_playing_chess.png | Bin 0 -> 7013 bytes .../social/cookbook/qa-with-citations.png | Bin 0 -> 7013 bytes assets/images/social/cookbook/react_agent.png | Bin 0 -> 7013 bytes assets/images/social/cookbook/simtom.png | Bin 0 -> 7013 bytes .../structured_generation_workflow.png | Bin 0 -> 7013 bytes assets/images/social/index.png | Bin 0 -> 7013 bytes assets/images/social/installation.png | Bin 0 -> 7013 bytes assets/images/social/licence.png | Bin 0 -> 7013 bytes assets/images/social/quickstart.png | Bin 0 -> 7013 bytes assets/images/social/reference/functions.png | Bin 0 -> 7013 bytes .../social/reference/generation/cfg.png | Bin 0 -> 7013 bytes .../social/reference/generation/choices.png | Bin 0 -> 7013 bytes .../generation/creating_grammars.png | Bin 0 -> 7013 bytes .../reference/generation/custom_fsm_ops.png | Bin 0 -> 7013 bytes .../social/reference/generation/format.png | Bin 0 -> 7013 bytes .../reference/generation/generation.png | Bin 0 -> 7013 bytes .../social/reference/generation/json.png | Bin 0 -> 7013 bytes .../social/reference/generation/regex.png | Bin 0 -> 7013 bytes .../structured_generation_explanation.png | Bin 0 -> 7013 bytes .../social/reference/generation/types.png | Bin 0 -> 7013 bytes assets/images/social/reference/index.png | Bin 0 -> 7013 bytes .../social/reference/models/exllamav2.png | Bin 0 -> 7013 bytes .../social/reference/models/llamacpp.png | Bin 0 -> 7013 bytes .../images/social/reference/models/mlxlm.png | Bin 0 -> 7013 bytes .../images/social/reference/models/models.png | Bin 0 -> 7013 bytes .../images/social/reference/models/openai.png | Bin 0 -> 7013 bytes assets/images/social/reference/models/tgi.png | Bin 0 -> 7013 bytes .../social/reference/models/transformers.png | Bin 0 -> 7013 bytes .../reference/models/transformers_vision.png | Bin 0 -> 7013 bytes .../images/social/reference/models/vllm.png | Bin 0 -> 7013 bytes assets/images/social/reference/prompting.png | Bin 0 -> 7013 bytes assets/images/social/reference/samplers.png | Bin 0 -> 7013 bytes assets/images/social/reference/serve/vllm.png | Bin 0 -> 7013 bytes assets/images/social/reference/text.png | Bin 0 -> 7013 bytes assets/images/social/welcome.png | Bin 0 -> 7013 bytes assets/javascripts/bundle.56dfad97.min.js | 16 + assets/javascripts/bundle.56dfad97.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.6ce7567c.min.js | 42 + .../workers/search.6ce7567c.min.js.map | 7 + assets/stylesheets/main.06209087.min.css | 1 + assets/stylesheets/main.06209087.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + blog/2024/01/10/roadmap-for-2024/index.html | 2652 +++++++ blog/archive/2024/index.html | 2421 ++++++ blog/assets/4000_stars.png | Bin 0 -> 57307 bytes blog/category/roadmap/index.html | 2421 ++++++ blog/index.html | 2437 ++++++ community/belonging.png | Bin 0 -> 142520 bytes community/contribute/index.html | 2746 +++++++ community/examples/index.html | 2419 ++++++ community/feedback/index.html | 2473 ++++++ community/index.html | 2411 ++++++ community/versioning/index.html | 2492 ++++++ cookbook/chain_of_density/index.html | 2583 +++++++ cookbook/chain_of_thought/index.html | 2569 +++++++ cookbook/classification/index.html | 2561 +++++++ cookbook/dating_profiles/index.html | 2724 +++++++ cookbook/deploy-using-bentoml/index.html | 2703 +++++++ cookbook/deploy-using-cerebrium/index.html | 2501 ++++++ cookbook/deploy-using-modal/index.html | 2702 +++++++ cookbook/extraction/index.html | 2451 ++++++ cookbook/images/chain_of_density.png | Bin 0 -> 515603 bytes cookbook/images/coding_structure_diagram.png | Bin 0 -> 73586 bytes .../images/knowledge-graph-extraction.png | Bin 0 -> 33127 bytes cookbook/images/simtom.png | Bin 0 -> 109657 bytes cookbook/index.html | 2427 ++++++ .../knowledge_graph_extraction/index.html | 2572 +++++++ cookbook/models_playing_chess/index.html | 2568 +++++++ cookbook/qa-with-citations/index.html | 2671 +++++++ cookbook/react_agent/index.html | 2661 +++++++ cookbook/simtom/index.html | 2621 +++++++ .../structured_generation_workflow/index.html | 2609 +++++++ css/timeago.css | 15 + index.html | 2382 ++++++ installation/index.html | 2498 ++++++ js/timeago.min.js | 2 + js/timeago_mkdocs_material.js | 33 + licence/index.html | 2423 ++++++ logos/amazon.png | Bin 0 -> 19489 bytes logos/apple.png | Bin 0 -> 21938 bytes logos/best_buy.png | Bin 0 -> 22328 bytes logos/canoe.png | Bin 0 -> 3719 bytes logos/cisco.png | Bin 0 -> 5784 bytes logos/dassault_systems.png | Bin 0 -> 21865 bytes logos/databricks.png | Bin 0 -> 59004 bytes logos/datadog.png | Bin 0 -> 32779 bytes logos/dbt_labs.png | Bin 0 -> 13418 bytes logos/gladia.jpg | Bin 0 -> 11198 bytes logos/harvard.png | Bin 0 -> 31910 bytes logos/hf.png | Bin 0 -> 91529 bytes logos/johns_hopkins.png | Bin 0 -> 76335 bytes logos/meta.png | Bin 0 -> 20763 bytes logos/mit.png | Bin 0 -> 1483 bytes logos/mount_sinai.png | Bin 0 -> 23749 bytes logos/nvidia.png | Bin 0 -> 15146 bytes logos/nyu.png | Bin 0 -> 30682 bytes logos/safran.png | Bin 0 -> 24435 bytes logos/salesforce.png | Bin 0 -> 20712 bytes logos/shopify.png | Bin 0 -> 8003 bytes logos/smithsonian.png | Bin 0 -> 35574 bytes logos/tinder.png | Bin 0 -> 28182 bytes logos/upenn.png | Bin 0 -> 97063 bytes objects.inv | Bin 0 -> 892 bytes overrides/home.html | 120 + overrides/main.html | 1 + quickstart/index.html | 2795 +++++++ reference/functions/index.html | 2386 ++++++ reference/generation/cfg/index.html | 2662 +++++++ reference/generation/choices/index.html | 2397 ++++++ .../generation/creating_grammars/index.html | 2448 ++++++ .../generation/custom_fsm_ops/index.html | 2500 ++++++ reference/generation/format/index.html | 2405 ++++++ reference/generation/generation/index.html | 2706 +++++++ reference/generation/json/index.html | 2565 +++++++ reference/generation/regex/index.html | 2408 ++++++ .../index.html | 2525 +++++++ reference/generation/types/index.html | 2515 ++++++ reference/index.html | 2402 ++++++ reference/models/exllamav2/index.html | 2389 ++++++ reference/models/llamacpp/index.html | 2960 ++++++++ reference/models/mlxlm/index.html | 2586 +++++++ reference/models/models/index.html | 2617 +++++++ reference/models/openai/index.html | 2820 +++++++ reference/models/tgi/index.html | 2387 ++++++ reference/models/transformers/index.html | 2500 ++++++ .../models/transformers_vision/index.html | 2678 +++++++ reference/models/vllm/index.html | 2956 ++++++++ reference/prompting/index.html | 2972 ++++++++ reference/samplers/index.html | 2813 +++++++ reference/serve/vllm/index.html | 2501 ++++++ reference/text/index.html | 2586 +++++++ search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes stylesheets/extra.css | 146 + welcome/index.html | 2710 +++++++ 218 files changed, 180409 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 api/guide/index.html create mode 100644 api/index.html create mode 100644 api/json_schema/index.html create mode 100644 api/models/index.html create mode 100644 api/parsing/index.html create mode 100644 api/prompts/index.html create mode 100644 api/regex/index.html create mode 100644 api/samplers/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/dottxt.png create mode 100644 assets/images/favicon.png create mode 100644 assets/images/logits_processing_diagram.svg create mode 100644 assets/images/logo-simple.png create mode 100644 assets/images/logo-square.png create mode 100644 assets/images/logo-square.svg create mode 100644 assets/images/logo.png create mode 100644 assets/images/logo.svg create mode 100644 assets/images/normal_computing.jpg create mode 100644 assets/images/social/api/guide.png create mode 100644 assets/images/social/api/index.png create mode 100644 assets/images/social/api/json_schema.png create mode 100644 assets/images/social/api/models.png create mode 100644 assets/images/social/api/parsing.png create mode 100644 assets/images/social/api/prompts.png create mode 100644 assets/images/social/api/regex.png create mode 100644 assets/images/social/api/samplers.png create mode 100644 assets/images/social/blog/archive/2024.png create mode 100644 assets/images/social/blog/category/roadmap.png create mode 100644 assets/images/social/blog/index.png create mode 100644 assets/images/social/blog/posts/roadmap-2024.png create mode 100644 assets/images/social/community/contribute.png create mode 100644 assets/images/social/community/examples.png create mode 100644 assets/images/social/community/feedback.png create mode 100644 assets/images/social/community/index.png create mode 100644 assets/images/social/community/versioning.png create mode 100644 assets/images/social/cookbook/chain_of_density.png create mode 100644 assets/images/social/cookbook/chain_of_thought.png create mode 100644 assets/images/social/cookbook/classification.png create mode 100644 assets/images/social/cookbook/dating_profiles.png create mode 100644 assets/images/social/cookbook/deploy-using-bentoml.png create mode 100644 assets/images/social/cookbook/deploy-using-cerebrium.png create mode 100644 assets/images/social/cookbook/deploy-using-modal.png create mode 100644 assets/images/social/cookbook/extraction.png create mode 100644 assets/images/social/cookbook/index.png create mode 100644 assets/images/social/cookbook/knowledge_graph_extraction.png create mode 100644 assets/images/social/cookbook/models_playing_chess.png create mode 100644 assets/images/social/cookbook/qa-with-citations.png create mode 100644 assets/images/social/cookbook/react_agent.png create mode 100644 assets/images/social/cookbook/simtom.png create mode 100644 assets/images/social/cookbook/structured_generation_workflow.png create mode 100644 assets/images/social/index.png create mode 100644 assets/images/social/installation.png create mode 100644 assets/images/social/licence.png create mode 100644 assets/images/social/quickstart.png create mode 100644 assets/images/social/reference/functions.png create mode 100644 assets/images/social/reference/generation/cfg.png create mode 100644 assets/images/social/reference/generation/choices.png create mode 100644 assets/images/social/reference/generation/creating_grammars.png create mode 100644 assets/images/social/reference/generation/custom_fsm_ops.png create mode 100644 assets/images/social/reference/generation/format.png create mode 100644 assets/images/social/reference/generation/generation.png create mode 100644 assets/images/social/reference/generation/json.png create mode 100644 assets/images/social/reference/generation/regex.png create mode 100644 assets/images/social/reference/generation/structured_generation_explanation.png create mode 100644 assets/images/social/reference/generation/types.png create mode 100644 assets/images/social/reference/index.png create mode 100644 assets/images/social/reference/models/exllamav2.png create mode 100644 assets/images/social/reference/models/llamacpp.png create mode 100644 assets/images/social/reference/models/mlxlm.png create mode 100644 assets/images/social/reference/models/models.png create mode 100644 assets/images/social/reference/models/openai.png create mode 100644 assets/images/social/reference/models/tgi.png create mode 100644 assets/images/social/reference/models/transformers.png create mode 100644 assets/images/social/reference/models/transformers_vision.png create mode 100644 assets/images/social/reference/models/vllm.png create mode 100644 assets/images/social/reference/prompting.png create mode 100644 assets/images/social/reference/samplers.png create mode 100644 assets/images/social/reference/serve/vllm.png create mode 100644 assets/images/social/reference/text.png create mode 100644 assets/images/social/welcome.png create mode 100644 assets/javascripts/bundle.56dfad97.min.js create mode 100644 assets/javascripts/bundle.56dfad97.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js create mode 100644 assets/javascripts/workers/search.6ce7567c.min.js.map create mode 100644 assets/stylesheets/main.06209087.min.css create mode 100644 assets/stylesheets/main.06209087.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 blog/2024/01/10/roadmap-for-2024/index.html create mode 100644 blog/archive/2024/index.html create mode 100644 blog/assets/4000_stars.png create mode 100644 blog/category/roadmap/index.html create mode 100644 blog/index.html create mode 100644 community/belonging.png create mode 100644 community/contribute/index.html create mode 100644 community/examples/index.html create mode 100644 community/feedback/index.html create mode 100644 community/index.html create mode 100644 community/versioning/index.html create mode 100644 cookbook/chain_of_density/index.html create mode 100644 cookbook/chain_of_thought/index.html create mode 100644 cookbook/classification/index.html create mode 100644 cookbook/dating_profiles/index.html create mode 100644 cookbook/deploy-using-bentoml/index.html create mode 100644 cookbook/deploy-using-cerebrium/index.html create mode 100644 cookbook/deploy-using-modal/index.html create mode 100644 cookbook/extraction/index.html create mode 100644 cookbook/images/chain_of_density.png create mode 100644 cookbook/images/coding_structure_diagram.png create mode 100644 cookbook/images/knowledge-graph-extraction.png create mode 100644 cookbook/images/simtom.png create mode 100644 cookbook/index.html create mode 100644 cookbook/knowledge_graph_extraction/index.html create mode 100644 cookbook/models_playing_chess/index.html create mode 100644 cookbook/qa-with-citations/index.html create mode 100644 cookbook/react_agent/index.html create mode 100644 cookbook/simtom/index.html create mode 100644 cookbook/structured_generation_workflow/index.html create mode 100644 css/timeago.css create mode 100644 index.html create mode 100644 installation/index.html create mode 100644 js/timeago.min.js create mode 100644 js/timeago_mkdocs_material.js create mode 100644 licence/index.html create mode 100644 logos/amazon.png create mode 100644 logos/apple.png create mode 100644 logos/best_buy.png create mode 100644 logos/canoe.png create mode 100644 logos/cisco.png create mode 100644 logos/dassault_systems.png create mode 100644 logos/databricks.png create mode 100644 logos/datadog.png create mode 100644 logos/dbt_labs.png create mode 100644 logos/gladia.jpg create mode 100644 logos/harvard.png create mode 100644 logos/hf.png create mode 100644 logos/johns_hopkins.png create mode 100644 logos/meta.png create mode 100644 logos/mit.png create mode 100644 logos/mount_sinai.png create mode 100644 logos/nvidia.png create mode 100644 logos/nyu.png create mode 100644 logos/safran.png create mode 100644 logos/salesforce.png create mode 100644 logos/shopify.png create mode 100644 logos/smithsonian.png create mode 100644 logos/tinder.png create mode 100644 logos/upenn.png create mode 100644 objects.inv create mode 100644 overrides/home.html create mode 100644 overrides/main.html create mode 100644 quickstart/index.html create mode 100644 reference/functions/index.html create mode 100644 reference/generation/cfg/index.html create mode 100644 reference/generation/choices/index.html create mode 100644 reference/generation/creating_grammars/index.html create mode 100644 reference/generation/custom_fsm_ops/index.html create mode 100644 reference/generation/format/index.html create mode 100644 reference/generation/generation/index.html create mode 100644 reference/generation/json/index.html create mode 100644 reference/generation/regex/index.html create mode 100644 reference/generation/structured_generation_explanation/index.html create mode 100644 reference/generation/types/index.html create mode 100644 reference/index.html create mode 100644 reference/models/exllamav2/index.html create mode 100644 reference/models/llamacpp/index.html create mode 100644 reference/models/mlxlm/index.html create mode 100644 reference/models/models/index.html create mode 100644 reference/models/openai/index.html create mode 100644 reference/models/tgi/index.html create mode 100644 reference/models/transformers/index.html create mode 100644 reference/models/transformers_vision/index.html create mode 100644 reference/models/vllm/index.html create mode 100644 reference/prompting/index.html create mode 100644 reference/samplers/index.html create mode 100644 reference/serve/vllm/index.html create mode 100644 reference/text/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 stylesheets/extra.css create mode 100644 welcome/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..5e80e5e5a --- /dev/null +++ b/404.html @@ -0,0 +1,2273 @@ + + + +
+ + + + + + + + + + + + + + + + + + +CFGGuide
+
+
+
+ Bases: Guide
Guide to generate text that is in the language of a context-free Lark grammar.
+ +outlines/fsm/guide.py
316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 |
|
__init__(cfg_string, tokenizer)
+
+Construct the PartialLark parser and set the empty initial_state (PartialParserState)
+ +outlines/fsm/guide.py
can_terminate_state(state)
+
+Generation is allowed to terminate
+ +outlines/fsm/guide.py
copy()
+
+get_next_instruction(state)
+
+Return the next instruction for guided generation.
+Current lazy approach: +- For each token in the vocabulary + - create a copy of the parsers state + - add the tokens to the parsers input text + - if valid, add token to returned tokens
+Further refinements are necessary for performant text processing.
+state + The guides current PartialParserState, or None if complete
+A Generate
instance that contains the model and the allowed token ids.
outlines/fsm/guide.py
get_next_state(state, token_id)
+
+Update the state of the guide. +Decode the token_id, and calculate the new parser_state with the token applied.
+state + The guides current PartialParserState, or None if complete +token_id + The id of the token that was just generated.
+The guides new PartialParserState
+ +outlines/fsm/guide.py
iter_valid_token_ids(state, candidate_token_ids)
+
+Iterate over the given token_ids and yield those that are valid for the current parser state.
+parser_state + The current state of the parser, or None if complete. +token_ids + The list of token ids to check for validity.
+int + Valid token ids.
+ +outlines/fsm/guide.py
must_terminate_state(state)
+
+Generation must terminate, no legal continuations
+ + +Generate
+
+
+
+ dataclass
+
+
+Generate instruction
+tokens
+ The tokens that lead to a valid completion if generated. A value
+ of None
indicates that all tokens are allowed.
outlines/fsm/guide.py
Guide
+
+
+
+ Bases: Protocol
Base definition of a generation guide.
+A generation guide defines the behavior of a finite-state machine that guides
+a text generation procedure. Unlike the DFAs built from regular expressions
+guides can also emit a Write
instructions which tells the model that it can
+append a sequence of tokens (or token word) instead of generating it.
outlines/fsm/guide.py
RegexGuide
+
+
+
+ Bases: Guide
Guide to generate text in the language of a regular expression.
+ +outlines/fsm/guide.py
184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 |
|
get_next_instruction(state)
+
+Return the next instruction for guided generation.
+The initialization of the guide builds an index which maps FSM states to a +map from authorized tokens to the state in which the guide needs to move +if said token is generated. Therefore the authorized tokens at the +current state are the keys of the map returned by the value of the index +for current state.
+If the current state is not contained in the end this means that we are +in a final state of the guide. We only authorize EOS tokens in the final +state.
+state + The current state of the guide.
+A Generate
instance that contains the model and the allowed token ids.
outlines/fsm/guide.py
get_next_state(state, token_id)
+
+Update the state of the guide.
+We use the index to determine to which state the guide should transition +given the token that was just generated.
+state + The current state of the guide. +token_id + The id of the token that was just generated.
+The new state of the guide.
+ +outlines/fsm/guide.py
is_final_state(state)
+
+StopAtEOSGuide
+
+
+
+ Bases: Guide
Guide to generate tokens until the EOS token has been generated.
+ +outlines/fsm/guide.py
__init__(tokenizer)
+
+Initialize the generation guide.
+model + The logit generator used to generate the next token.
+ +outlines/fsm/guide.py
Write
+
+
+
+ dataclass
+
+
+Write instruction.
+tokens + The sequence of tokens to be added to the current sequence by the + generation process.
+ +outlines/fsm/guide.py
create_states_mapping(regex_string, tokenizer, regex_parser=interegular.parse_pattern, frozen_tokens=[])
+
+Create the variables related to the mapping between states and tokens +The parameters of the function are used for caching purpose.
+regex_string: (str
):
+ The regular expression string to generate a states mapping for.
+tokenizer: (Tokenizer
):
+ The model's tokenizer.
+regex_parser: (Callable[[str], interegular.Pattern]
, optional):
+ A function that parses a regex string into an interegular
Pattern object.
+frozen_tokens: (List[str]
, optional):
+ A list of tokens that should be kept as-is when expanding the token-level FSM
+ into a byte-level FSM. Defaults to an empty list.
states_to_token_maps: (Dict[int, Dict[int, int]]
):
+ A mapping from states to a mapping from token ids originating from that state
+ to the next state to transition to given that token. The structure is as follows:
+ (origin_state -> (token_id -> next_state))
+empty_token_ids: (Set[int]
):
+ A set of token ids that correspond to empty strings.
+final_states: (set
):
+ A set of final states in the FSM.
outlines/fsm/guide.py
build_regex_from_schema(schema, whitespace_pattern=None)
+
+Turn a JSON schema into a regex that matches any JSON object that follows + this schema.
+JSON Schema is a declarative language that allows to annotate JSON documents + with types and descriptions. These schemas can be generated from any Python + datastructure that has type annotation: namedtuples, dataclasses, Pydantic + models. And by ensuring that the generation respects the schema we ensure + that the output can be parsed into these objects. + This function parses the provided schema and builds a generation schedule which + mixes deterministic generation (fixed strings), and sampling with constraints.
+Parameters
+schema
+ A string that represents a JSON Schema.
+ whitespace_pattern
+ Pattern to use for JSON syntactic whitespace (doesn't impact string literals)
+ Example: allow only a single space or newline with whitespace_pattern=r"[
+]?"
Returns
+A generation schedule. A list of strings that represent the JSON + schema's structure and regular expression that define the structure of + the fields.
+References
+.. [0] JSON Schema. https://json-schema.org/
+ +outlines/fsm/json_schema.py
convert_json_schema_to_str(json_schema)
+
+Convert a JSON schema to a string.
+json_schema + The JSON schema.
+str + The JSON schema converted to a string.
+ValueError + If the schema is not a dictionary, a string or a Pydantic class.
+ +outlines/fsm/json_schema.py
get_schema_from_signature(fn)
+
+Turn a function signature into a JSON schema.
+Every JSON object valid to the output JSON Schema can be passed
+to fn
using the ** unpacking syntax.
outlines/fsm/json_schema.py
to_regex(resolver, instance, whitespace_pattern=None)
+
+Translate a JSON Schema instance into a regex that validates the schema.
+Note
+Many features of JSON schema are missing:
+ - Handle additionalProperties
keyword
+ - Handle types defined as a list
+ - Handle constraints on numbers
+ - Handle special patterns: date
, uri
, etc.
This does not support recursive definitions.
+Parameters
+resolver
+ An object that resolves references to other instances within a schema
+ instance
+ The instance to translate
+ whitespace_pattern
+ Pattern to use for JSON syntactic whitespace (doesn't impact string literals)
+ Example: allow only a single space or newline with whitespace_pattern=r"[
+]?"
outlines/fsm/json_schema.py
175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 +504 +505 +506 +507 +508 +509 +510 +511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 |
|
validate_quantifiers(min_bound, max_bound, start_offset=0)
+
+Ensures that the bounds of a number are valid. Bounds are used as quantifiers in the regex.
+min_bound + The minimum value that the number can take. +max_bound + The maximum value that the number can take. +start_offset + Number of elements that are already present in the regex but still need to be counted. + ex: if the regex is already "(-)?(0|[1-9][0-9])", we will always have at least 1 digit, so the start_offset is 1.
+min_bound + The minimum value that the number can take. +max_bound + The maximum value that the number can take.
+ValueError + If the minimum bound is greater than the maximum bound.
+TypeError or ValueError + If the minimum bound is not an integer or None. + or + If the maximum bound is not an integer or None.
+ +outlines/fsm/json_schema.py
TransformerTokenizer
+
+
+
+ Bases: Tokenizer
Represents a tokenizer for models in the transformers
library.
outlines/models/transformers.py
64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 |
|
Transformers
+
+
+Represents a transformers
model.
outlines/models/transformers.py
129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 |
|
forward(input_ids, attention_mask, past_key_values=None)
+
+Compute a forward pass through the transformer model.
+input_ids + The input token ids. Must be one or two dimensional. +attention_mask + The attention mask. Must be one or two dimensional. +past_key_values + A tuple of tuples containing the cached key and value tensors for each + attention head.
+The computed logits and the new cached key and value tensors.
+ +outlines/models/transformers.py
generate(prompts, generation_parameters, logits_processor, sampling_parameters)
+
+Generate text using transformers
.
prompts
+ A prompt or list of prompts.
+generation_parameters
+ An instance of GenerationParameters
that contains the prompt,
+ the maximum number of tokens, stop sequences and seed. All the
+ arguments to SequenceGeneratorAdapter
's __cal__
method.
+logits_processor
+ The logits processor to use when generating text.
+sampling_parameters
+ An instance of SamplingParameters
, a dataclass that contains
+ the name of the sampler to use and related parameters as available
+ in Outlines.
The generated text
+ +outlines/models/transformers.py
stream(prompts, generation_parameters, logits_processor, sampling_parameters)
+
+Temporary stream stand-in which implements stream() signature +and equivalent behaviour but isn't yielded until generation completes.
+TODO: implement following completion of https://github.com/huggingface/transformers/issues/30810
+ +outlines/models/transformers.py
get_llama_tokenizer_types()
+
+Get all the Llama tokenizer types/classes that need work-arounds.
+When they can't be imported, a dummy class is created.
+ +outlines/models/transformers.py
transformers(model_name, device=None, model_kwargs={}, tokenizer_kwargs={}, model_class=None, tokenizer_class=None)
+
+Instantiate a model from the transformers
library and its tokenizer.
model_name
+ The name of the model as listed on Hugging Face's model page.
+device
+ The device(s) on which the model should be loaded. This overrides
+ the device_map
entry in model_kwargs
when provided.
+model_kwargs
+ A dictionary that contains the keyword arguments to pass to the
+ from_pretrained
method when loading the model.
+tokenizer_kwargs
+ A dictionary that contains the keyword arguments to pass to the
+ from_pretrained
method when loading the tokenizer.
A TransformersModel
model instance.
outlines/models/transformers.py
Integration with OpenAI's API.
+ + + +OpenAI
+
+
+An object that represents the OpenAI API.
+ +outlines/models/openai.py
69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 |
|
__call__(prompt, max_tokens=None, stop_at=None, *, system_prompt=None, temperature=None, samples=None)
+
+Call the OpenAI API to generate text.
+prompt + A string or list of strings that will be used to prompt the model +max_tokens + The maximum number of tokens to generate +stop_at + A string or array of strings which, such that the generation stops + when they are generated. +system_prompt + The content of the system message that precedes the user's prompt. +temperature + The value of the temperature used to sample tokens +samples + The number of completions to generate for each prompt +stop_at + Up to 4 words where the API will stop the completion.
+ +outlines/models/openai.py
__init__(client, config, system_prompt=None)
+
+Create an OpenAI
instance.
This class supports the standard OpenAI API, the Azure OpeanAI API as +well as compatible APIs that rely on the OpenAI client.
+client
+ An instance of the API's async client.
+config
+ An instance of OpenAIConfig
. Can be useful to specify some
+ parameters that cannot be set by calling this class' methods.
outlines/models/openai.py
OpenAIConfig
+
+
+
+ dataclass
+
+
+Represents the parameters of the OpenAI API.
+The information was last fetched on 2023/11/20. We document below the +properties that are specific to the OpenAI API. Not all these properties are +supported by Outlines.
+model
+ The name of the model. Available models can be found on OpenAI's website.
+frequence_penalty
+ Number between 2.0 and -2.0. Positive values penalize new tokens based on
+ their existing frequency in the text,
+logit_bias
+ Modifies the likelihood of specified tokens to appear in the completion.
+ Number between -100 (forbid) and +100 (only allows).
+n
+ The number of completions to return for each prompt.
+presence_penalty
+ Similar to frequency penalty.
+response_format
+ Specifies the format the model must output. {"type": "json_object"}
+ enables JSON mode.
+seed
+ Two completions with the same seed
value should return the same
+ completion. This is however not guaranteed.
+stop
+ Up to 4 words where the API will stop the completion.
+temperature
+ Number between 0 and 2. Higher values make the output more random, while
+ lower values make it more deterministic.
+top_p
+ Number between 0 and 1. Parameter for nucleus sampling.
+user
+ A unique identifier for the end-user.
outlines/models/openai.py
error_handler(api_call_fn)
+
+Handle OpenAI API errors and missing API key.
+ +outlines/models/openai.py
generate_chat(prompt, system_prompt, client, config)
+
+
+ async
+
+
+Call OpenAI's Chat Completion API.
+prompt
+ The prompt we use to start the generation. Passed to the model
+ with the "user" role.
+system_prompt
+ The system prompt, passed to the model with the "system" role
+ before the prompt.
+client
+ The API client
+config
+ An OpenAIConfig
instance.
A tuple that contains the model's response(s) and usage statistics.
+ +outlines/models/openai.py
PartialIndenter
+
+
+
+ Bases: Indenter
An Indenter
that doesn't reset its state every time process
is called.
outlines/fsm/parsing.py
PartialParserState
+
+
+
+ Bases: ParserState
outlines/fsm/parsing.py
346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 +460 +461 +462 +463 +464 +465 +466 +467 +468 +469 +470 +471 +472 +473 +474 +475 +476 +477 +478 +479 +480 +481 +482 +483 +484 +485 +486 +487 +488 +489 +490 +491 +492 +493 +494 +495 +496 +497 +498 +499 +500 +501 +502 +503 |
|
accepts()
+
+Adapted from https://github.com/lark-parser/lark/blob/be542c2ff6d968817df019b8bf03f37b3111c08c/lark/parsers/lalr_interactive_parser.py#L95 +Returns the set of possible tokens that will advance the parser into a new valid state.
+ +outlines/fsm/parsing.py
feed_token_no_stack(token, is_end=False)
+
+This is a copy of ParserState.feed_token
with all the value stack
+steps removed. Since we're not exactly parsing in order to obtain a
+CST or anything similar, we can avoid the growing expense of tracking
+the parse tree.
outlines/fsm/parsing.py
PartialParsingFrontend
+
+
+
+ Bases: ParsingFrontend
outlines/fsm/parsing.py
162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 |
|
PartialScanner
+
+
+
+ Bases: Scanner
outlines/fsm/parsing.py
550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 +600 +601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 +622 +623 +624 +625 +626 +627 +628 +629 +630 +631 +632 |
|
get_terminals_info(fsm_state_seq)
+
+Get the possible terminal symbols for an FSM state sequence.
+ +outlines/fsm/parsing.py
match(text, pos, last_fsm_state_seq=None)
+
+Determine an FSM match over text
starting at pos
and continuing last_fsm_state_seq
.
outlines/fsm/parsing.py
terminals_to_fsms(lp)
+
+Construct a dict
mapping terminal symbol names to their finite state machines.
outlines/fsm/parsing.py
Prompt
+
+
+
+ dataclass
+
+
+Represents a prompt function.
+We return a Prompt
class instead of a simple function so the
+template defined in prompt functions can be accessed.
outlines/prompts.py
__call__(*args, **kwargs)
+
+Render and return the template.
+The rendered template as a Python str
.
outlines/prompts.py
get_fn_args(fn)
+
+Returns the arguments of a function with annotations and default values if provided.
+ +outlines/prompts.py
get_fn_description(fn)
+
+Returns the first line of a callable's docstring.
+ +outlines/prompts.py
get_fn_name(fn)
+
+Returns the name of a callable.
+ +outlines/prompts.py
get_fn_signature(fn)
+
+Return the signature of a callable.
+ +outlines/prompts.py
get_fn_source(fn)
+
+Return the source code of a callable.
+ +outlines/prompts.py
get_schema_dict(model)
+
+get_schema_pydantic(model)
+
+Return the schema of a Pydantic model.
+ +outlines/prompts.py
parse_pydantic_schema(raw_schema, definitions)
+
+Parse the output of Basemodel.[schema|model_json_schema]()
.
This recursively follows the references to other schemas in case +of nested models. Other schemas are stored under the "definitions" +key in the schema of the top-level model.
+ +outlines/prompts.py
prompt(fn)
+
+Decorate a function that contains a prompt template.
+This allows to define prompts in the docstring of a function and simplify their
+manipulation by providing some degree of encapsulation. It uses the render
+function internally to render templates.
++++++import outlines
+@outlines.prompt +def build_prompt(question): +... "I have a ${question}" +... +prompt = build_prompt("How are you?")
+
This API can also be helpful in an "agent" context where parts of the prompt +are set when the agent is initialized and never modified later. In this situation +we can partially apply the prompt function at initialization.
+++++++import outlines +import functools as ft +... +@outlines.prompt +... def solve_task(name: str, objective: str, task: str): +... '''Your name is {{name}}. +.. Your overall objective is to {{objective}}. +... Please solve the following task: {{task}} +... ''' +... +hal = ft.partial(solve_task, "HAL", "Travel to Jupiter")
+
A Prompt
callable class which will render the template when called.
outlines/prompts.py
render(template, **values)
+
+Parse a Jinaj2 template and translate it into an Outlines graph.
+This function removes extra whitespaces and linebreaks from templates to +allow users to enter prompts more naturally than if they used Python's +constructs directly. See the examples for a detailed explanation.
+Outlines follow Jinja2's syntax
+++++++import outlines +outline = outlines.render("I like {{food}} and {{sport}}", food="tomatoes", sport="tennis") +I like tomatoes and tennis
+
If the first line of the template is empty, render
removes it
++++++from outlines import render
+tpl = ''' +... A new string''' +tpl +... '\nA new string' +render(tpl) +... 'a new string'
+
Similarly, render
ignores linebreaks introduced by placing the closing quotes
+underneath the text:
++++++tpl = ''' +... A new string +... ''' +tpl +... '\nA new string\n' +render(tpl) +... 'A new string'
+
If you want to insert a linebreak at the end of the rendered template, you will +need to leave an empty line at the end of the template:
+++++++tpl = ''' +... A new string +... +... ''' +tpl +... '\nA new string\n\n' +render(tpl) +... 'A new string\n'
+
render
removes the identation in docstrings. This is particularly important
+when using prompt functions
++++++tpl = ''' +... a string +... and another string''' +tpl +... '\n a string\n and another string' +render(tpl) +... 'a string\nand another string'
+
The indentation of the first line is assumed to be the same as the second line's
+++++++tpl = '''a string +... and another''' +tpl +... 'a string\n and another' +render(tpl) +... 'a string\nand another'
+
To get a different indentation for the first and the second line, we can start the +prompt on the string's second line:
+++++++tpl = ''' +... First line +... Second line''' +render(tpl) +... 'First Line\n Second Line'
+
template + A string that contains a template written with the Jinja2 syntax. +**values + Map from the variables in the template to their value.
+A string that contains the rendered template.
+ +outlines/prompts.py
94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 |
|
regex(model, regex_str, sampler=multinomial())
+
+Generate structured text in the language of a regular expression.
+model:
+ An instance of Transformer
that represents a model from the
+ transformers
library.
+regex_str:
+ The regular expression that the output must follow.
+sampler:
+ The sampling algorithm to use to generate token ids from the logits
+ distribution.
A SequenceGeneratorAdapter
instance that generates text constrained by the
+regular expression.
outlines/generate/regex.py
BeamSearchSampler
+
+
+Beam Search sampling algorithm.
+samples + The number of samples taken for each input sequence. Equivalent to the + number of beams.
+ +outlines/samplers.py
247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 |
|
__call__(next_token_logits, sequence_weights, _)
+
+Call the beam search sampler.
+next_token_logits
+ A tensor of shape (n_seqs, vocab_size,)
that represents the
+ probability distribution of the next token over the vocabulary.
+sequence_weights
+ A tensor of shape (n_seqs,)
that represents the cumulative
+ weight of each sequence.
+rng
+ A random number generator.
A tuple with an array that contains the ids of the sampled tokens of
+shape (n_seqs, 1)
, an array that contains the ancestors of each
+sampled id of shape (n_seqs,)
and an array that contains the updated
+cumulative weights of each sequence of shape (n_seqs,)
.
outlines/samplers.py
260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 |
|
GreedySampler
+
+
+Greedy Sampling algorithm.
+Greedy sampling consists in choosing the token with the largest +likelihood at every step.
+We don't allow more than one sample. We could attribute this a meaning, for +instance the k-th sample represents the k-th most likely token. In which +case it would be equivalent to beam search without the sequence weights.
+samples + The number of samples taken for each input sequence.
+ +outlines/samplers.py
__call__(next_token_logits, sequence_weights, _)
+
+Call the greedy sampler.
+next_token_logits
+ A tensor of shape (n_seqs, vocab_size,)
that represents the
+ probability distribution of the next token over the vocabulary.
+sequence_weights
+ A tensor of shape (n_seqs,)
that represents the cumulative
+ weight of each sequence.
+rng
+ A random number generator.
A tuple with an array that contains the ids of the sampled tokens of
+shape (n_seqs, 1)
, an array that contains the ancestors of each
+sampled id of shape (n_seqs,)
and an array that contains the updated
+cumulative weights of each sequence of shape (n_seqs,)
.
outlines/samplers.py
MultinomialSampler
+
+
+Multinomial sampling algorithm.
+Multinomial sampling consists in randomly sampling the next token assuming +its distribution is a Categorical distribution parametrized by the +next-token logits.
+samples + The number of samples taken for each input sequence.
+ +outlines/samplers.py
83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 |
|
__call__(next_token_logits, sequence_weights, rng)
+
+Call the multinomial sampler.
+next_token_logits
+ A tensor of shape (n_seqs, vocab_size,)
that represents the
+ probability distribution of the next token over the vocabulary.
+sequence_weights
+ A tensor of shape (n_seqs,)
that represents the cumulative
+ weight of each sequence.
+rng
+ A random number generator.
A tuple with an array that contains the ids of the sampled tokens of
+shape (n_seqs, 1)
, an array that contains the ancestors of each
+sampled id of shape (n_seqs,)
and an array that contains the updated
+cumulative weights of each sequence of shape (n_seqs,)
.
outlines/samplers.py
keep_top_k_logits(k)
+
+Build a function that masks logits values smaller than the top k
ones.
k
+ The ranking below which logit values are replaced by -math.inf
.
outlines/samplers.py
keep_top_p_logits(p)
+
+Build a function that masks the lowest probability tokens whose +cumulative probability is below a certain threshold.
+p
+ The value of the threshold. We keep the highest probability tokens whose
+ cumulative distribution is greater than or equal to p
and mask the
+ others. Its value must be between 0 (excluded) and 1 (included).
outlines/samplers.py
rescale_logits(temperature)
+
+Build a function that rescales the token probabilities exponentially.
+temperature + The value by which we rescale the logits.
+ +outlines/samplers.py
SCw?g1AZdtb{ZFl
z%_QRDDvV9d$zFv<9N43+R}q<=aJ<0cQRvq6Y>g57Nj0o)^pEM>={?FLD{Wf!R|_)5
ztd0|kul@rbYGAM>3Z1u}!9swXgv5kEC5*^a#?}+RI799q)hq(IB`%O7Pv^R?zQA$G
zpD&k0hs++Df%%M2pEM^E(-Ys)*>i-2VB?^tHt>@K5lWt+_fChi$l$&+TXpJ1hA#=%
zxagg4NtpL!(+*z77R?40Qehrg$dSzC`T!nNP!lKcZUltkGt(IYw8M{1l~0}pk05B9
zg{JfC!<=NMi9^JUhq(%mpz2x&uRA_&O6Q~skMR2k{*c3;em43u9kO8riQ9*?NY+IY
zDiI4fO%Ckqz2TDiCsltfo8{fd%$mVE>;pGCawuW3xPv!Gck?KeV6B|&oKj3eYOHXe
z2<-Qy8Co7!0JxFzRKx4D_G;-7qt<)V<4qn5i9cXUS2zh%T$;~i0g>;wM*-l+{l=EF
zY1oZ_#NLkhBW>>CkO#%(>$02{_D&%kFtb34cV` n)eKl*BiAN1Tp5@I|=#iLr)t
zVpgJ_)=S(njLO`vyp1<6s 4c}N
zLQAK-`yUved#jHWR)}D~#{;4XS&`2DM$uP*!ej$fswv=3Ia+=PTUt=A0A2(p9On^s
zs#)9nCLPWef5rY53ahHe4MzC2MgthyPP@2;a>Hf_l3%aKOHP)4V-yQBdztQtU@3PS
zzr_has0r>gd}L<=g`oGozOD}PBlGjvk{fKr43P$-%Hhf5_8|H{UQ+4<;b5B2TBf~y
zvps4xNVB?8z>=EouM63?&N(L=N&&=zvf-y OnzzZww{o}
zFI@4?tHP1p$!SE^ymdPo)+nH9{BPq|9$mm!Q@RESU0MnNlAUZ!Dd6JQ{E}B+r!&3i2G;h4XEwW(+(JQuozDy9hjhGDyUPQMjT`5b-pLcpgNG#vO>`CdtBW6%*j>|
zc`B(+n+7EA#_|vd2J3aGQP}7=!FCRbP%ClteCKf#dgPZ2C)CEDI7|Y|tL8}gcU5|X
z$`PtzqLazkQ2x+6Npy~7<`mra_!d8OMaLnYDgN;!$iyNpu@EMKTKKDET69Dn_a-up
z<1Po^p`sim8~p5nde3s8Guh6-8Hw@0yI
zXAdiYuS&-l(miUeOUWFQ@||?^Sr}&w>gZfH7fYt>MH^|TJz9n~L0SuU2#(~uXY%Q^
zN13Avz0I1=HU=)Gf58->BBX5L&?~Y&4~o!l-tP(-s00}+|H^a}E;5X^uqcwoxn=$V
zIqpAMAeR06rt?h}QLUHkqK>(DWquz0ZXKc0+TX<#oIVJ$k|sa9FOu6HU2$ZIjov$q
zuViIVUBz-eo*f4_lF<
zKVQ%1<3$v1^*N`kb%DA$^uf=U-d
pjUMp!tuksP(4GXn)NtQF
zerg8nN;R#ysD$#=Op+7;a9>;Zlfkkv
UikSZtwU^&|CL&SWtF*3GT+>Ei={
zhR|bLNm(|DU7j
Dv)P3RVz!
zg_Eb9v8;Z|frRrhylo5}h%f0PXSDe${)C8H-U6oDl20afEO!kmmy@Pe;G?f}SY9)#
znsDU?Ynk*6w7b;q76*&&