diff --git a/fern/definition/gdpr-privacy.yml b/fern/definition/gdpr-privacy.yml
new file mode 100644
index 0000000..909ed4d
--- /dev/null
+++ b/fern/definition/gdpr-privacy.yml
@@ -0,0 +1,83 @@
+docs: |
+
+service:
+ base-path: '/api/config/privacy'
+ auth: true
+ endpoints:
+ delete:
+ display-name: Delete specific user data
+ docs: |
+
+ You can delete traces data for a specific user of yours by specifying their association properties.
+ path: "/data-deletion"
+ method: DELETE
+ request: DeleteUserData
+ response: DeleteUserDataResponse
+ examples:
+ - request: $DeleteUserData.ExampleDeleteUserData
+ response:
+ body: $DeleteUserDataResponse.ExampleDeleteUserDataResponse
+ get:
+ display-name: Status of user deletion request
+ docs: |
+ By default, all prompts and responses are logged.
+
+ If you’ve disabled this behavior by following [this guide](/docs/openllmetry/privacy/prompts-completions-and-embeddings), and then selectively enabled it for some of your users, then you can use this API to view which users you’ve enabled.
+ path: "/data-deletion"
+ request:
+ name: GetDeletionStatusRequest
+ query-parameters:
+ requestId:
+ type: string
+ docs: |
+
+ The request ID from the user deletion request.
+ method: GET
+ response: GetDeletionStatusResponse
+
+types:
+
+ DeleteUserData:
+ properties:
+ associationProperties:
+ type: unknown
+ docs: |
+
+ A list of users to delete, each specific using a specific criterion for deletion like `{ userId: "123" }`.
+ examples:
+ - name: ExampleDeleteUserData
+ value:
+ associationProperties: [
+ {
+ userId: "123"
+ }
+ ]
+ DeleteUserDataResponse:
+ properties:
+ requestId:
+ type: string
+ docs: |
+
+ The request ID for this deletion request. You can use it to query the status of the deletion.
+ examples:
+ - name: ExampleDeleteUserDataResponse
+ value:
+ requestId: "456"
+
+ GetDeletionStatusResponse:
+ properties:
+ completed:
+ type: boolean
+ docs: |
+
+ `true` if the process was completed, `false` otherwise.
+ deleted:
+ type: string
+ docs: |
+
+ The number of spans that were deleted.
+ total:
+ type: string
+ docs: |
+
+ The number of spans that need to be deleted in total.
\ No newline at end of file
diff --git a/fern/definition/tracing.yml b/fern/definition/tracing.yml
new file mode 100644
index 0000000..93b0333
--- /dev/null
+++ b/fern/definition/tracing.yml
@@ -0,0 +1,88 @@
+docs: |
+
+service:
+ base-path: '/api/config/pii'
+ auth: true
+ endpoints:
+ enable:
+ display-name: Enable logging of prompts and responses
+ docs: |
+ By default, all prompts and responses are logged.
+
+ If you want to disable this behavior by following [this guide](/docs/openllmetry/privacy/prompts-completions-and-embeddings), you can selectively enable it for some of your users with this API.
+ path: "/tracing-allow-list"
+ method: POST
+ request: EnableLogging
+ examples:
+ - request: $EnableLogging.ExampleEnableLogging
+ get:
+ display-name: Get identifiers of users that are allowed to be logged
+ docs: |
+ By default, all prompts and responses are logged.
+
+ If you’ve disabled this behavior by following [this guide](/docs/openllmetry/privacy/prompts-completions-and-embeddings), and then selectively enabled it for some of your users, then you can use this API to view which users you’ve enabled.
+ path: "/tracing-allow-list"
+ method: GET
+ response: GetIdentifiersResponse
+ examples:
+ - response:
+ body: $GetIdentifiersResponse.ExampleGetIdentifiersResponse
+ disable:
+ display-name: Disable logging of prompts and responses for specific users
+ docs: |
+ By default, all prompts and responses are logged.
+
+ If you’ve disabled this behavior by following [this guide](/docs/openllmetry/privacy/prompts-completions-and-embeddings), and then selectively enabled it for some of your users, then you can use this API to disable it for previously enabled ones.
+ path: "/tracing-allow-list"
+ method: DELETE
+ request: DisableLogging
+ examples:
+ - request: $DisableLogging.ExampleDisableLogging
+
+types:
+
+ EnableLogging:
+ properties:
+ associationPropertyAllowList:
+ type: unknown
+ docs: |
+
+ The list of association properties (like `{ userId: "123" }`) that will be allowed to be logged.
+ examples:
+ - name: ExampleEnableLogging
+ value:
+ associationPropertyAllowList: [
+ userId: "123"
+ ]
+
+ GetIdentifiersResponse:
+ properties:
+ associationPropertyAllowList:
+ type: unknown
+ docs: |
+
+ The list of association properties (like `{ userId: "123" }`) that are allowed to be logged.
+ examples:
+ - name: ExampleGetIdentifiersResponse
+ value:
+ associationPropertyAllowList: [
+ {
+ userId: "123"
+ },
+ {
+ userId: "456",
+ chatId: "abc"
+ }
+ ]
+ DisableLogging:
+ properties:
+ associationProperty:
+ type: unknown
+ docs: |
+
+ A single association property (like `{ userId: "123" }`) that was previously allowed to be logged.
+ examples:
+ - name: ExampleDisableLogging
+ value:
+ associationProperty:
+ userId: "123"
\ No newline at end of file
diff --git a/fern/docs.yml b/fern/docs.yml
new file mode 100644
index 0000000..396f59e
--- /dev/null
+++ b/fern/docs.yml
@@ -0,0 +1,155 @@
+instances:
+ - url: traceloop.docs.buildwithfern.com/docs
+ custom-domain: fern.traceloop.com/docs
+
+title: Traceloop | Docs
+
+tabs:
+ docs:
+ display-name: Documentation
+ icon: "fa-duotone fa-book-open"
+ llmetry:
+ display-name: OpenLLMetry
+ icon: "fa-duotone fa-telescope"
+ slug: openllmetry
+ api:
+ display-name: Dashboard API
+ icon: "fa-duotone fa-webhook"
+
+navigation:
+ - tab: docs
+ layout:
+ - section: Learn
+ contents:
+ - page: Introduction
+ path: ./pages/documentation/learn/intro.mdx
+ - section: Prompt Management
+ contents:
+ - page: Quick Start
+ path: ./pages/documentation/prompt-management/quickstart.mdx
+ - page: Prompt Registry
+ path: ./pages/documentation/prompt-management/prompt-registry.mdx
+ - page: Fetching Prompts
+ path: ./pages/documentation/prompt-management/fetching-prompts.mdx
+ - tab: llmetry
+ layout:
+ - section: Introduction
+ contents:
+ - page: What is OpenLLMetry?
+ path: ./pages/openllmetry/intro/what-is-llmetry.mdx
+ - section: Quick Start
+ contents:
+ - page: Python
+ path: ./pages/openllmetry/quickstart/python.mdx
+ - page: Node.js
+ path: ./pages/openllmetry/quickstart/node.mdx
+ - page: Next.js
+ path: ./pages/openllmetry/quickstart/next.mdx
+ - page: Go
+ path: ./pages/openllmetry/quickstart/go.mdx
+ - page: Ruby
+ path: ./pages/openllmetry/quickstart/ruby.mdx
+ - page: SDK Initialization Options
+ path: ./pages/openllmetry/quickstart/sdk-initialization.mdx
+ - page: Troubleshooting
+ path: ./pages/openllmetry/quickstart/troubleshooting.mdx
+ - section: Tracing
+ contents:
+ - page: Workflows, Tasks, Agents, and Tools
+ path: ./pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
+ - page: Associating Entities with Traces
+ path: ./pages/openllmetry/tracing/entities-traces.mdx
+ - page: Tracking User Feedback
+ path: ./pages/openllmetry/tracing/tracking-feedback.mdx
+ - page: Manual Implementations (Typescript / Javascript)
+ path: ./pages/openllmetry/tracing/manual-implementations.mdx
+ - page: Usage with Threads (Python)
+ path: ./pages/openllmetry/tracing/usage-threads.mdx
+ - page: Without OpenLLMetry SDK
+ path: ./pages/openllmetry/tracing/without-sdk.mdx
+ - section: Integrations
+ contents:
+ - page: Overview
+ path: ./pages/openllmetry/integrations/overview.mdx
+ - page: Traceloop
+ path: ./pages/openllmetry/integrations/traceloop.mdx
+ - page: Axiom
+ path: ./pages/openllmetry/integrations/axiom.mdx
+ - page: Azure Applications Insights
+ path: ./pages/openllmetry/integrations/azure-insights.mdx
+ - page: Datadog
+ path: ./pages/openllmetry/integrations/datadog.mdx
+ - page: Dynatrace
+ path: ./pages/openllmetry/integrations/dynatrace.mdx
+ - page: Grafana
+ path: ./pages/openllmetry/integrations/grafana.mdx
+ - page: Honeycomb
+ path: ./pages/openllmetry/integrations/honeycomb.mdx
+ - page: HyperDX
+ path: ./pages/openllmetry/integrations/hyperdx.mdx
+ - page: Instana
+ path: ./pages/openllmetry/integrations/instana.mdx
+ - page: New Relic
+ path: ./pages/openllmetry/integrations/new-relic.mdx
+ - page: OpenTelemetry Collector
+ path: ./pages/openllmetry/integrations/opentelemetry-collector.mdx
+ - page: Service Now Cloud Observability
+ path: ./pages/openllmetry/integrations/service-now.mdx
+ - page: SigNoz
+ path: ./pages/openllmetry/integrations/signoz.mdx
+ - page: Sentry
+ path: ./pages/openllmetry/integrations/sentry.mdx
+ - page: Splunk
+ path: ./pages/openllmetry/integrations/splunk.mdx
+ - section: Privacy
+ contents:
+ - page: Prompts, Completions, and Embeddings
+ path: ./pages/openllmetry/privacy/prompts-completions-embeddings.mdx
+ - page: Telemetry
+ path: ./pages/openllmetry/privacy/telemetry.mdx
+ - section: Contribute
+ contents:
+ - page: Overview
+ path: ./pages/openllmetry/contribute/overview.mdx
+ - page: Local Development
+ path: ./pages/openllmetry/contribute/local-development.mdx
+ - page: GenAI Semantic Conventions
+ path: ./pages/openllmetry/contribute/gen-ai.mdx
+ - tab: api
+ layout:
+ - section: API Reference
+ contents:
+ - page: Introduction
+ path: ./pages/api/intro.mdx
+ - api: Endpoints
+
+navbar-links:
+ - type: secondary
+ text: Community
+ url: https://traceloop.com/slack?_gl=1*898qql*_ga*NDc2NTEyODExLjE3MTAzMzg5OTc.*_ga_HPMNWFTJ5B*MTcxMDMzODk5Ni4xLjEuMTcxMDM1NjMyMy4wLjAuMA..
+ - type: secondary
+ text: Github
+ url: https://github.com/traceloop
+ - type: secondary
+ text: Website
+ url: https://www.traceloop.com/
+ - type: primary
+ text: Start Now
+ url: https://app.traceloop.com/
+
+layout:
+ searchbar-placement: header
+
+colors:
+ accentPrimary:
+ dark: "#ffb53d"
+ light: "#228BE6"
+ background:
+ light: "#ffffff"
+
+logo:
+ dark: ./assets/traceloop-dark.png
+ light: ./assets/traceloop-light.png
+ height: 24
+
+favicon: ./assets/favicon.png
diff --git a/fern/pages/documentation/learn/intro.mdx b/fern/pages/documentation/learn/intro.mdx
new file mode 100644
index 0000000..f789db9
--- /dev/null
+++ b/fern/pages/documentation/learn/intro.mdx
@@ -0,0 +1,53 @@
+---
+excerpt: Monitor, debug and test the quality of your LLM outputs
+---
+
+Traceloop automatically monitors the quality of your LLM outputs. It helps you to debug and test changes to your models and prompts.
+
+- Get real-time alerts about your model’s quality
+- Execution tracing for every request
+- Gradually rollout changes to models and prompts
+- Debug and re-run issues from production in your IDE
+
+Need help using Traceloop? Ping us at [dev@traceloop.com](mailto:dev@traceloop.com)
+
+## Get Started - Install OpenLLMetry SDK
+
+Traceloop natively plugs into OpenLLMetry SDK. To get started, pick the language you are using and follow the instructions.
+
+
+
+ Available
+
+
+ Available
+
+
+ Beta
+
+
+ Beta
+
+
+ In Development
+
+
+ In Development
+
+
diff --git a/fern/pages/documentation/prompt-management/fetching-prompts.mdx b/fern/pages/documentation/prompt-management/fetching-prompts.mdx
new file mode 100644
index 0000000..30b4235
--- /dev/null
+++ b/fern/pages/documentation/prompt-management/fetching-prompts.mdx
@@ -0,0 +1,76 @@
+---
+excerpt: Use your managed prompts with the Traceloop SDKs
+---
+
+## Using your prompt
+
+The SDK fetches your prompts from Traceloop servers. Changes made to a prompt are available in the SDK during runtime. The SDK polls the Traceloop servers for changes every every poll interval.
+
+The default poll interval is 60 seconds but can be configured with the `TRACELOOP_SYNC_POLL_INTERVAL` environment variable, or the initialization function. When in the `Development` environment, the poll interval is determined by the `TRACELOOP_SYNC_DEV_POLL_INTERVAL` environment variable or appropriate initialization argument, and defaults to 5 seconds.
+
+To disable polling all together, set the `TRACELOOP_SYNC_ENABLED` environment variable to false (its enabled by default).
+
+Make sure you’ve configured the SDK with the right environment and API Key. See the [SDK documentation](/docs/openllmetry/integrations/traceloop) for more information.
+
+
+ The SDK uses smart caching mechanisms to proide zero latency for fetching
+ prompts.
+
+
+## Get Prompt API
+
+Let’s say you’ve created a prompt with a key `joke_generator` in the UI and set ot to:
+
+```json
+Tell me a joke about OpenTelemetry as a {{persona}}
+```
+
+Then, you can retrieve it with in your code using `get_prompt`:
+
+
+
+ ```python
+ from openai import OpenAI
+ from traceloop.sdk.prompts import get_prompt
+
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
+
+ prompt_args = get_prompt(key="joke_generator", variables={"persona": "pirate"})
+ completion = client.chat.completions.create(**prompt_args)
+ ```
+
+
+ ```javascript
+ import * as traceloop from "@traceloop/node-server-sdk";
+
+ const prompt = traceloop.getPrompt("joke_generator", { persona: "pirate" });
+ const chatCompletion = await openai.chat.completions.create(prompt);
+ ```
+
+
+ ```golang
+ import "github.com/sashabaranov/go-openai"
+
+ func call_llm() {
+ // traceloop is the object you got when you initialized the SDK
+ request, err := traceloop.GetOpenAIChatCompletionRequest("joke_generator", map[string]interface{}{ "persona": "pirate" })
+ if err != nil {
+ fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
+ return
+ }
+ client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
+ resp, err := client.CreateChatCompletion(
+ context.Background(),
+ *request,
+ )
+ }
+ ```
+
+
+
+
+
+ The returned variable `prompt_args` is compatible with the API used by the
+ foundation models SDKs (OpenAI, Anthropic, etc.) which means you should
+ directly plug in the response to the appropriate API call.
+
diff --git a/fern/pages/documentation/prompt-management/prompt-registry.mdx b/fern/pages/documentation/prompt-management/prompt-registry.mdx
new file mode 100644
index 0000000..6b55a59
--- /dev/null
+++ b/fern/pages/documentation/prompt-management/prompt-registry.mdx
@@ -0,0 +1,71 @@
+---
+excerpt: Manage your prompts on the Traceloop platform
+---
+
+Traceloop’s Prompt Registry is where you manage your prompts. You can create, edit, evaluate and deploy prompts to your environments.
+
+## Configuring Prompts
+
+
+ ![prompt-management-prompt-registry](https://fern-image-hosting.s3.amazonaws.com/traceloop/prompt-management-prompt-registry.png)
+
+
+The prompt configuration is composed of two parts:
+
+- The prompt template (system and/or user prompts)
+- The model configuration (`temperature`, `top_p`, etc.)
+
+
+ Your prompt template can include variables. Variables are defined according to
+ the syntax of the parser specified. For example, if using `jinjia2` the syntax
+ will be `{{ variable_name }}`. You can then pass variable values to the SDK
+ when calling `get_prompt`. See the example on the [SDK
+ Usage](/fetching-prompts) section.
+
+
+Initially, prompts are created in `Draft Mode`. In this mode, you can make changes to the prompt and configuration. You can also test your prompt in the playground (see below).
+
+## Testing a Prompt Configuration (Prompt Playground)
+
+By using the prompt playground you can iterate and refine your prompt before deploying it.
+
+
+ ![prompt-management-prompt-registry-testing](https://fern-image-hosting.s3.amazonaws.com/traceloop/prompt-management-prompt-registry-testing.png)
+
+
+Simply click on the `Test` button in the playground tab at the bottom of the screen.
+
+If your prompt includes variables, then you need to define values for them before testing. Choose `Variables` in the right side bar and assign a value to each.
+
+Once you click the `Test` button your prompt template will be rendered with the values you provided and will be sent to the configured LLM with the model configuration defined. The completion response (including token usage) will be displayed in the playground.
+
+## Deploying Prompts
+
+
+ ![prompt-management-prompt-registry-deploying](https://fern-image-hosting.s3.amazonaws.com/traceloop/prompt-management-prompt-registry-deploying.png)
+
+
+Draft mode prompts can only be deployed to the `development` environment.
+
+Once you are satisfied with the prompt, you can publish it and make it available to deploy in all environments. Once published, the prompt version cannot be edited anymore.
+
+Choose the `Deploy` Tab to navigate to the deployments page for your prompt.
+
+Here, you can see all recent prompt versions, and which environments they are deployed to. Simply click on the `Deploy` button to deploy a prompt version to an environment. Similarly, click `Rollback` to revert to a previous prompt version for a specific environment.
+
+
+ As a safeguard, you cannot deploy a prompt to the `Staging` environment before
+ first deploying it to `Development`. Similarly, you cannot deploy to
+ `Production` without first deploying to `Staging`.
+
+
+To fetch prompts from a specific environment, you must supply that environment’s API key to the Traceloop SDK. See the [SDK Configuration](/docs/openllmetry/integrations/traceloop) for details
+
+## Prompt Versions
+
+If you want to make changes to your prompt after deployment, simply create a new version by clicking on the `New Version` button. New versions will be created in `Draft Mode`.
+
+
+ If you change the names of variables or add/remove existing variables, you
+ will be required to create a new prompt.
+
diff --git a/fern/pages/documentation/prompt-management/quickstart.mdx b/fern/pages/documentation/prompt-management/quickstart.mdx
new file mode 100644
index 0000000..ef0d1e8
--- /dev/null
+++ b/fern/pages/documentation/prompt-management/quickstart.mdx
@@ -0,0 +1,107 @@
+
+ ![prompt-management-quickstart](https://fern-image-hosting.s3.amazonaws.com/traceloop/prompt-management-quickstart.png)
+
+
+You can use Traceloop to manage your prompts and model configurations. That way you can easily experiment with different prompts, and rollout changes gradually and safely.
+
+
+ Make sure you’ve created an API key and set it as an environment variable
+ `TRACELOOP_API_KEY` before you start. Check out the SDK’s [getting started
+ guide](/docs/openllmetry/quick-start/python) for more information.
+
+
+
+
+### Create a new prompt
+
+Click **New Prompt** to create a new prompt. Give it a name, which will be used to retrieve it in your code later.
+
+### Step 2: Define it in the Prompt Registry
+
+Set the system and/or user prompt. You can use variables in your prompt by following the [Jinja format](https://jinja.palletsprojects.com/en/3.1.x/templates/) of `{{ variable_name }}`. The values of these variables will be passed in when you retrieve the prompt in your code.
+
+For more information see the [Registry Documentation](/prompt-registry).
+
+
+ This screen is also a prompt playground. Give the prompt a try by clicking
+ **Test** at the bottom.
+
+
+### Deploy the prompt to your developement environement
+
+Click **Deploy to Dev** to deploy the prompt to your development environment.
+
+### Use the prompt in your code
+
+If you haven’t done so, make sure to [generate an API key](https://app.traceloop.com/settings/api-keys) and set it as an environment variable `TRACELOOP_API_KEY`.
+
+Make sure to initialize the SDK. On Typescript/Javascript, you should also wait for the initialization to complete.
+
+
+
+ ```python
+ from traceloop.sdk import Traceloop
+
+ Traceloop.init()
+ ```
+
+
+ ```javascript
+ import * as traceloop from "@traceloop/node-server-sdk";
+
+ traceloop.initialize();
+ await traceloop.waitForInitialization();
+ ```
+
+
+
+
+Retrieve your prompt by using the `get_prompt` function. For example, if you’ve created a prompt with the key `joke_generator` and a single variable `persona`:
+
+
+
+ ```python
+ from traceloop.sdk import Traceloop
+
+ Traceloop.init()
+ ```
+
+
+ ```javascript
+ import * as traceloop from "@traceloop/node-server-sdk";
+
+ traceloop.initialize();
+ await traceloop.waitForInitialization();
+ ```
+
+
+ ```golang
+ import "github.com/sashabaranov/go-openai"
+
+ func call_llm() {
+ // traceloop is the object you got when you initialized the SDK
+ request, err := traceloop.GetOpenAIChatCompletionRequest("joke_generator", map[string]interface{}{ "persona": "pirate" })
+ if err != nil {
+ fmt.Printf("GetOpenAIChatCompletionRequest error: %v\n", err)
+ return
+ }
+ client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
+ resp, err := client.CreateChatCompletion(
+ context.Background(),
+ *request,
+ )
+ }
+ ```
+
+
+
+
+
+ The returned variable `prompt_args` is compatible with the API used by the
+ foundation models SDKs (OpenAI, Anthropic, etc.) which means you can directly
+ plug in the response to the appropriate API call.
+
+
+
+
+For more information see the [SDK Usage Documentation](/fetching-prompts).
diff --git a/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx b/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx
new file mode 100644
index 0000000..d190a19
--- /dev/null
+++ b/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx
@@ -0,0 +1,11 @@
+---
+excerpt: LLM observability with OpenTelemetry Collector
+---
+
+Since Traceloop is emitting standard OTLP HTTP (standard OpenTelemetry protocol), you can use any OpenTelemetry Collector, which gives you the flexibility to then connect to any backend you want. First, [deploy an OpenTelemetry Collector](https://opentelemetry.io/docs/kubernetes/operator/automatic/#create-an-opentelemetry-collector-optional) in your cluster. Then, point the output of the Traceloop SDK to the collector by setting:
+
+```
+TRACELOOP_BASE_URL=https://:4318
+```
+
+You can connect your collector to Traceloop by following the instructions in the [Traceloop integration section](/docs/openllmetry/integrations/traceloop#using-an-opentelemetry-collector).
diff --git a/fern/pages/openllmetry/integrations/overview.mdx b/fern/pages/openllmetry/integrations/overview.mdx
new file mode 100644
index 0000000..b966986
--- /dev/null
+++ b/fern/pages/openllmetry/integrations/overview.mdx
@@ -0,0 +1,34 @@
+---
+excerpt: Connect to any observability platform - Traceloop, Dynatrace, Datadog, Honeycomb, and others
+---
+
+Since Traceloop SDK is using OpenTelemetry under the hood, you can see everything in any observability platform that supports OpenTelemetry.
+
+## The Integrations Catalog
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/fern/pages/openllmetry/integrations/traceloop.mdx b/fern/pages/openllmetry/integrations/traceloop.mdx
new file mode 100644
index 0000000..4005336
--- /dev/null
+++ b/fern/pages/openllmetry/integrations/traceloop.mdx
@@ -0,0 +1,62 @@
+---
+excerpt: LLM Observability with Traceloop
+---
+
+
+ ![integrations-traceloop](https://fern-image-hosting.s3.amazonaws.com/traceloop/integrations-traceloop.png)
+
+
+[Traceloop](https://app.traceloop.com/) is a platform for observability and evaluation of LLM outputs. It allows you to deploy changes to prompts and model configurations with confidence, without breaking existing functionality.
+
+
+
+ On Traceloop, API keys can be generated from the [Traceloop Dashboard](https://app.traceloop.com/settings/api-keys), for each of the three supported environments (Development, Staging, Production).
+
+
+ ### Go to [Traceloop Environments Management](https://app.traceloop.com/settings/api-keys)
+
+ You can also reach here by clicking on **Environments** on the left-hand navigation bar.
+
+ ### Click on **Generate API Key**
+
+ ### Click **Copy Key** to copy the API key
+
+
+ API Keys are only displayed once, at the time of their creation and are not stored anywhere. If you lose your API key, you will need to revoke the old one and generate a new one.
+
+
+ ### Set the API key as an environment variable named `TRACELOOP_API_KEY`.
+
+
+
+Done! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard.
+
+
+
+ If you are using an [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/), you can route metrics and traces to Traceloop by simply adding an OTLP exporter to your collector configuration.
+
+```yaml
+receivers:
+ otlp:
+ protocols:
+ http:
+ endpoint: 0.0.0.0:4318
+processors:
+ batch:
+exporters:
+ otlp/traceloop:
+ endpoint: "api.traceloop.com" # US instance
+ headers:
+ "Authorization": "Bearer "
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp/traceloop]
+```
+
+You can route OpenLLMetry to your collector by following the [OpenTelemetry Collector](/docs/openllmetry/integrations/open-telemetry-collector) integration instructions.
+
+
+
diff --git a/fern/pages/openllmetry/intro/what-is-llmetry.mdx b/fern/pages/openllmetry/intro/what-is-llmetry.mdx
new file mode 100644
index 0000000..abf4095
--- /dev/null
+++ b/fern/pages/openllmetry/intro/what-is-llmetry.mdx
@@ -0,0 +1,100 @@
+
+ ![openllmetry-intro](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-intro.png)
+
+
+OpenLLMetry is an open source project that allows you to easily start monitoring and debugging the execution of your LLM app. Tracing is done in a non-intrusive way, built on top of OpenTelemetry. You can choose to export the traces to Traceloop, or to your existing observability stack.
+
+
+ You can use OpenLLMetry whether you use a framework like LangChain, or
+ directly interact with a foundation model API.
+
+
+
+
+ ```python
+ from openai import OpenAI
+ from traceloop.sdk import Traceloop
+ from traceloop.sdk.decorators import workflow
+
+ Traceloop.init(app_name="joke_generation_service")
+
+ @workflow(name="joke_creation")
+ def create_joke():
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
+ completion = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}],
+ )
+
+ return completion.choices[0].message.content
+ ```
+
+
+ ```typescript
+ import * as traceloop from "@traceloop/node-server-sdk";
+ import OpenAI from "openai";
+
+ Traceloop.init({ app_name="joke_generation_service" })
+ const openai = new OpenAI();
+
+ class MyLLM {
+ @traceloop.workflow("joke_creation")
+ async create_joke():
+ completion = await openai.chat.completions.create({
+ model: "gpt-3.5-turbo",
+ messages: [{"role": "user", "content": "Tell me a joke about opentelemetry"}],
+ })
+
+ return completion.choices[0].message.content
+ }
+ ```
+
+
+
+
+## Getting Started
+
+
+
+ Set up Traceloop Python SDK in your project
+
+
+ Set up Traceloop Javascript SDK in your project
+
+
+ Set up Traceloop Go SDK in your project
+
+
+ Learn how to annotate your code to enrich your traces
+
+
+ Learn how to connect to your existing observability stack
+
+
+ How we secure your data
+
+
diff --git a/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx b/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx
new file mode 100644
index 0000000..ef9d5eb
--- /dev/null
+++ b/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx
@@ -0,0 +1,81 @@
+**By default, OpenLLMetry logs prompts, completions, and embeddings to span attributes.** This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
+
+However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
+
+## Disabling logging globally
+
+To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to false. On Typescript/Javascript you can also pass the `traceContent` option.
+
+
+
+ ```
+ TRACELOOP_TRACE_CONTENT=false
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({ traceContent: false });
+ ```
+
+
+
+OpenLLMetry SDK, as well as all individual instrumentations will respect this setting.
+
+## Enabling logging selectively in specific workflows / tasks
+
+You can decide to selectively enable prompt logging for specific workflows, tasks, agents, or tools, using the annotations API. If you don’t specify a `traceContent` option, the global setting will be used.
+
+
+
+ ```typescript
+ return await traceloop.withWorkflow(
+ { name: "workflow_name", traceContent: false },
+ async () => {
+ // ...
+ }
+ );
+ ```
+
+
+ ```typescript
+ class MyClass {
+ @traceloop.workflow({ traceContent: false })
+ async some_workflow() {
+ // ...
+ }
+ }
+ ```
+
+
+
+## Enabling logging selectively for specific users
+
+You can decide to selectively enable or disable prompt logging for specific users or workflows.
+
+
+
+ We have an API to enable content tracing for specific users, as defined by [association entities](/docs/openllmetry/tracing/associating-entities-with-traces). See the [Traceloop API documentation](/dashboard-api/endpoints) for more information.
+
+
+ Set a key called `override_enable_content_tracing` in the OpenTelemetry context to `True` right before making the LLM call you want to trace with prompts. This will create a new context that will instruct instrumentations to log prompts and completions as span attributes.
+
+
+ ```python
+ from opentelemetry.context import attach, set_value
+
+ attach(set_value("override_enable_content_tracing", True))
+ ```
+
+
+ Make sure to also disable it afterwards:
+
+
+ ```python
+ from opentelemetry.context import attach, set_value
+
+ attach(set_value("override_enable_content_tracing", False))
+ ```
+
+
+
+
diff --git a/fern/pages/openllmetry/quickstart/next.mdx b/fern/pages/openllmetry/quickstart/next.mdx
new file mode 100644
index 0000000..13260b2
--- /dev/null
+++ b/fern/pages/openllmetry/quickstart/next.mdx
@@ -0,0 +1,211 @@
+---
+excerpt: Install OpenLLMetry for Next.js by following these 3 easy steps and get instant monitoring.
+---
+
+You can check out our full working example with Next.js 13 [here](https://github.com/traceloop/openllmetry-nextjs-demo).
+
+
+ ### Install the SDK
+
+Run the following command in your terminal:
+
+{" "}
+
+
+ ```bash npm install @traceloop/node-server-sdk ```
+
+
+ ```bash pnpm add @traceloop/node-server-sdk ```
+
+
+ ```bash yarn add @traceloop/node-server-sdk ```
+
+
+
+
+
+ Create a file named `instrumentation.ts` in the root of your project (i.e., outside of the `pages` or `app` directory) and add the following code:
+
+ ```javascript
+ export async function register() {
+ if (process.env.NEXT_RUNTIME === "nodejs") {
+ await import("./instrumentation.node.ts");
+ }
+ }
+ ```
+
+ Create a file named `instrumentation.node.ts` in the root of your project and add the following code:
+
+ ```javascript
+ import * as traceloop from "@traceloop/node-server-sdk";
+ import OpenAI from "openai";
+ // Make sure to import the entire module you want to instrument, like this:
+ // import * as LlamaIndex from "llamaindex";
+
+ traceloop.initialize({
+ appName: "app",
+ disableBatch: true,
+ instrumentModules: {
+ openAI: OpenAI,
+ // Add any other modules you'd like to instrument here
+ // for example:
+ // llamaIndex: LlamaIndex,
+ },
+ });
+ ```
+
+
+ Make sure to explictly pass any LLM modules you want to instrument as otherwise auto-instrumentation won’t work on Next.js. Also make sure to set `disableBatch` to `true`.
+
+
+ On Next.js v12 and below, you’ll also need to add the following to your `next.config.js`:
+
+ ```javascript
+ /** @type {import('next').NextConfig} */
+ const nextConfig = {
+ experimental: {
+ instrumentationHook: true,
+ },
+ };
+
+ module.exports = nextConfig;
+ ```
+
+
+ See official Next.js [OpenTelemetry docs](https://nextjs.org/docs/pages/building-your-application/optimizing/open-telemetry) for more information.
+
+
+
+
+
+ Install the following packages by running the following commands in your terminal:
+
+
+
+ ```bash
+ npm install --save-dev node-loader
+ npm i supports-color@8.1.1
+ ```
+
+
+ ```bash
+ pnpm add -D node-loader
+ pnpm add supports-color@8.1.1
+ ```
+
+
+ ```bash
+ yarn add -D node-loader
+ yarn add supports-color@8.1.1
+ ```
+
+
+
+ Edit your `next.config.js` file and add the following webpack configuration:
+
+ ```javascript
+ const nextConfig = {
+ webpack: (config, { isServer }) => {
+ config.module.rules.push({
+ test: /\.node$/,
+ loader: "node-loader",
+ });
+ if (isServer) {
+ config.ignoreWarnings = [{ module: /opentelemetry/ }];
+ }
+ return config;
+ },
+ };
+ ```
+
+ On every app API route you want to instrument, add the following code at the top of the file:
+
+ ```javascript
+ import * as traceloop from "@traceloop/node-server-sdk";
+ import OpenAI from "openai";
+ // Make sure to import the entire module you want to instrument, like this:
+ // import * as LlamaIndex from "llamaindex";
+
+ traceloop.initialize({
+ appName: "app",
+ disableBatch: true,
+ instrumentModules: {
+ openAI: OpenAI,
+ // Add any other modules you'd like to instrument here
+ // for example:
+ // llamaIndex: LlamaIndex,
+ },
+ });
+ ```
+
+
+ See official Next.js [OpenTelemetry docs](https://nextjs.org/docs/pages/building-your-application/optimizing/open-telemetry) for more information.
+
+
+
+
+
+### Annotate your workflows
+
+{" "}
+
+ ![openllmetry-next](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-next.png)
+
+
+f you have complex workflows or chains, you can annotate them to get a better understanding of what’s going on. You’ll see the complete trace of your workflow on Traceloop or any other dashboard you’re using.
+
+We have a set of [methods and decorators](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools) to make this easier. Assume you have a function that renders a prompt and calls an LLM, simply wrap it in a `withWorkflow()` function call.
+
+We also have compatible Typescript decorators for class methods which are more convenient.
+
+{" "}
+
+ If you’re using an LLM framework like Haystack, Langchain or LlamaIndex -
+ we’ll do that for you. No need to add any annotations to your code.
+
+
+
+
+ ```javascript
+ async function suggestAnswers(question: string) {
+ return await withWorkflow({ name: "suggestAnswers" }, () => {
+ // your code here...
+ });
+ }
+ ```
+
+
+ ```javascript
+ class MyLLM {
+ @traceloop.workflow({ name: "suggest_answers" })
+ async suggestAnswers(question: string) {
+ // your code here...
+ }
+ }
+ ```
+
+
+
+For more information, see the [dedicated section in the docs](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools).
+
+### Configure Trace Exporting
+
+Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`.
+
+For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview).
+
+### Using Traceloop Cloud
+
+Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the development environment and click **Copy API Key** to copy it over.
+
+{" "}
+
+ Make sure to copy it as it won’t be shown again.
+
+
+Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`.
+
+
+ ![openllmetry-next-2](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-next-2.png)
+
+
diff --git a/fern/pages/openllmetry/quickstart/node.mdx b/fern/pages/openllmetry/quickstart/node.mdx
new file mode 100644
index 0000000..74aca94
--- /dev/null
+++ b/fern/pages/openllmetry/quickstart/node.mdx
@@ -0,0 +1,107 @@
+---
+excerpt: Install OpenLLMetry for Node.js by following these 3 easy steps and get instant monitoring.
+---
+
+
+ If you’re on Next.js, follow the [Next.js guide](/next).
+
+
+
+### Install the SDK
+Run the following command in your terminal:
+
+
+
+ ```bash npm install @traceloop/node-server-sdk ```
+
+
+ ```bash pnpm add @traceloop/node-server-sdk ```
+
+
+ ```bash yarn add @traceloop/node-server-sdk ```
+
+
+
+In your LLM app, initialize the Traceloop tracer like this:
+
+```javascript
+import * as traceloop from "@traceloop/node-server-sdk";
+
+traceloop.initialize();
+```
+
+
+ Because of the way Javascript works, you must import the Traceloop SDK before
+ importing any LLM module like OpenAI.
+
+
+If you’re running this locally, you may want to disable batch sending, so you can see the traces immediately:
+
+```javascript
+traceloop.initialize({ disableBatch: true });
+```
+
+### Annotate your workflows
+
+
+ ![openllmetry-node](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-node.png)
+
+
+If you have complex workflows or chains, you can annotate them to get a better understanding of what’s going on. You’ll see the complete trace of your workflow on Traceloop or any other dashboard you’re using.
+
+We have a set of [methods and decorators](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools) to make this easier. Assume you have a function that renders a prompt and calls an LLM, simply wrap it in a `withWorkflow()` function call.
+
+We also have compatible Typescript decorators for class methods which are more convenient.
+
+
+ If you’re using an LLM framework like Haystack, Langchain or LlamaIndex -
+ we’ll do that for you. No need to add any annotations to your code.
+
+
+
+
+ ```javascript
+ async function suggestAnswers(question: string) {
+ return await withWorkflow({ name: "suggestAnswers" }, () => {
+ // your code here...
+ });
+ }
+ ```
+
+
+ ```javascript
+ class MyLLM {
+ @traceloop.workflow({ name: "suggest_answers" })
+ async suggestAnswers(question: string) {
+ // your code here...
+ }
+ }
+ ```
+
+
+
+For more information, see the [dedicated section in the docs](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools).
+
+### Configure Trace Exporting
+
+Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`.
+
+For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview).
+
+### Using Traceloop Cloud
+
+Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the development environment and click **Copy API Key** to copy it over.
+
+
+ Make sure to copy it as it won’t be shown again.
+
+
+
+ ![openllmetry-node-2](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-node-2.png)
+
+
+Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`.
+
+
+
+You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard.
diff --git a/fern/pages/openllmetry/quickstart/python.mdx b/fern/pages/openllmetry/quickstart/python.mdx
new file mode 100644
index 0000000..4442d7b
--- /dev/null
+++ b/fern/pages/openllmetry/quickstart/python.mdx
@@ -0,0 +1,80 @@
+---
+excerpt: Install OpenLLMetry for Python by following these 3 easy steps and get instant monitoring.
+---
+
+You can also check out our full working example of a RAG pipeline with Pinecone [here](https://github.com/traceloop/pinecone-demo).
+
+
+
+### Install the SDK
+
+Run the following command in your terminal:
+
+
+ ```bash pip install traceloop-sdk ```
+ ```bash poetry add traceloop-sdk ```
+
+
+In your LLM app, initialize the Traceloop tracer like this:
+
+```python
+from traceloop.sdk import Traceloop
+
+Traceloop.init()
+```
+
+If you’re running this locally, you may want to disable batch sending, so you can see the traces immediately:
+
+```python
+Traceloop.init(disable_batch=True)
+```
+
+### Annotate your workflows
+
+
+ ![openllmetry-python](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-python.png)
+
+
+If you have complex workflows or chains, you can annotate them to get a better understanding of what’s going on. You’ll see the complete trace of your workflow on Traceloop or any other dashboard you’re using.
+
+We have a set of [decorators](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools) to make this easier. Assume you have a function that renders a prompt and calls an LLM, simply add `@workflow` (or for asynchronous methods - `@aworkflow`).
+
+
+ If you’re using an LLM framework like Haystack, Langchain or LlamaIndex -
+ we’ll do that for you. No need to add any annotations to your code.
+
+
+```python
+from traceloop.sdk.decorators import workflow
+
+@workflow(name="suggest_answers")
+def suggest_answers(question: str):
+
+...
+```
+
+For more information, see the [dedicated section in the docs](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools).
+
+### Configure trace exporting
+
+Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`.
+
+For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview).
+
+### Using Traceloop Cloud
+
+Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the developement environment and click **Copy API Key** to copy it over.
+
+
+ Make sure to copy it as it won’t be shown again.
+
+
+
+ ![openllmetry-python-2](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-python-2.png)
+
+
+Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`.
+
+
+
+You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard.
diff --git a/fern/pages/openllmetry/quickstart/ruby.mdx b/fern/pages/openllmetry/quickstart/ruby.mdx
new file mode 100644
index 0000000..dbe8e32
--- /dev/null
+++ b/fern/pages/openllmetry/quickstart/ruby.mdx
@@ -0,0 +1,86 @@
+---
+excerpt: Install OpenLLMetry for Ruby by following these 3 easy steps and get instant monitoring.
+---
+
+
+ This is still in beta. Give us feedback at
+ [dev@traceloop.com](mailto:dev@traceloop.com)
+
+
+
+### Install the SDK
+
+Run the following command in your terminal:
+
+
+ ```bash gem install traceloop-sdk ```
+ ```bash bundle add traceloop-sdk ```
+
+
+In your LLM app, initialize the Traceloop tracer like this:
+
+```Ruby
+require "traceloop/sdk"
+
+traceloop = Traceloop::SDK::Traceloop.new
+```
+
+
+ If you’re using Rails, this needs to be in `config/initializers/traceloop.rb`
+
+
+### Log your prompts
+
+
+ ![openllmetry-ruby](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-ruby.png)
+
+
+For now, we don’t automatically instrument libraries on Ruby (as opposed to Python and Javascript). This will change in later versions.
+
+This means that you’ll need to manually log your prompts and completions.
+
+```ruby
+require "openai"
+
+client = OpenAI::Client.new
+
+# This tracks the latency of the call and the response
+traceloop.llm_call() do |tracer|
+ # Log the prompt
+ tracer.log_prompt(model="gpt-3.5-turbo", user_prompt="Tell me a joke about OpenTelemetry")
+
+ # Call OpenAI like you normally would
+ response = client.chat(
+ parameters: {
+ model: "gpt-3.5-turbo",
+ messages: [{ role: "user", content: "Tell me a joke about OpenTelemetry" }]
+ })
+
+ # Pass the response form OpenAI as is to log the completion and token usage
+ tracer.log_response(response)
+end
+```
+
+### Configure Trace Exporting
+
+Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`.
+
+For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview).
+
+### Using Traceloop Cloud
+
+Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the development environment and click **Copy API Key** to copy it over.
+
+
+ Make sure to copy it as it won’t be shown again.
+
+
+
+ ![openllmetry-ruby-2](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-ruby-2.png)
+
+
+Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`.
+
+
+
+You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard.
diff --git a/fern/pages/openllmetry/quickstart/sdk-initialization.mdx b/fern/pages/openllmetry/quickstart/sdk-initialization.mdx
new file mode 100644
index 0000000..2b9c4c9
--- /dev/null
+++ b/fern/pages/openllmetry/quickstart/sdk-initialization.mdx
@@ -0,0 +1,249 @@
+---
+excerpt: Documentation of the initialization options for the SDKs.
+---
+
+Most configuration options can be set via environment variables or via the SDK’s initialization options.
+
+
+ The SDK initialization options always take precedence over the environment
+ variables.
+
+
+## Application Name
+
+You can customize the application name that will be logged with the traces. This is useful to identify if you have multiple services with OpenLLMetry installed.
+
+
+
+ ```python
+ Traceloop.init(app_name="my app name")
+ ```
+
+
+ ```typescript
+ Traceloop.initialize({ appName: "my app name" });
+ ```
+
+
+
+## Resource Attributes
+
+You can further add any custom attributes to the OpenTelemetry resource. This is useful to add information about the environment where the application is running, such as the environment name, the application version, etc.
+
+
+```python
+Traceloop.init(resource_attributes={"env": "prod", "version": "1.0.0"})
+```
+
+
+## Base URL
+
+This defines the OpenTelemetry endpoint to connect to. It defaults to https://api.traceloop.com.
+
+If you prefix it with `http` or `https`, it will use the OTLP/HTTP protocol. Otherwise, it will use the OTLP/GRPC protocol.
+
+For configuring this to different observability platform, check out our [integrations section](/docs/openllmetry/integrations/overview).
+
+
+ The OpenTelemetry standard defines that the actual endpoint should always end
+ with `/v1/traces`. Thus, if you specify a base URL, we always append
+ `/v1/traces` to it. This is similar to how `OTLP_EXPORTER_OTLP_ENDPOINT` works
+ in all OpenTelemetry SDKs.
+
+
+
+
+ ```
+ TRACELOOP_BASE_URL=
+ ```
+
+
+ ```python
+ Traceloop.init(api_endpoint=)
+ ```
+
+
+ ```typescript
+ Traceloop.initialize({ baseUrl: })
+ ```
+
+
+
+## API Key
+
+If set, this is sent as a bearer token on the Authorization header.
+
+[Traceloop](/docs/openllmetry/integrations/traceloop), for example, use this to authenticate incoming traces and requests.
+
+
+ If this is not set, and the base URL is set to `https://api.traceloop.com`,
+ the SDK will generate a new API key automatically with the Traceloop
+ dashboard.
+
+
+
+
+ ```
+ TRACELOOP_API_KEY=
+ ```
+
+
+ ```python
+ Traceloop.init(api_key=)
+ ```
+
+
+ ```typescript
+ Traceloop.initialize({ apiKey: })
+ ```
+
+
+
+## Headers
+
+If set, this is sent as-is as HTTP headers. This is useful for custom authentication protocols that some observability platforms require. The format follows the [W3C Correlation-Context format](https://github.com/w3c/baggage/blob/main/baggage/HTTP_HEADER_FORMAT.md), i.e. `key1=value1, key2=value2`. If you need spaces, use `%20`. This is similar to how `OTEL_EXPORTER_OTLP_HEADERS` works in all OpenTelemetry SDKs.
+
+If this is set, the API key is ignored.
+
+
+
+ ```
+ TRACELOOP_HEADERS=key1=value1,key2=value2
+ ```
+
+
+ ```python
+ Traceloop.init(headers={"key1": "value1", "key2": "value2"})
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({ headers: { key1: "value1", key2: "value2" } });
+ ```
+
+
+
+## Custom Traces Exporter
+
+If, for some reason, you cannot use the OTLP/HTTP or OTLP/GRPC exporter that is provided with the SDK, you can set a custom exporter (for example, to Jaeger, Zipkin, or others)
+
+
+ If this is set, Base URL, API key and headers configurations are ignored.
+
+
+
+
+ ```python
+ Traceloop.init(exporter=ZipkinExporter(endpoint="http://localhost:9411/api/v2/spans"))
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({ exporter: new ZipkinExporter() });
+ ```
+
+
+
+## Disable Batch
+
+By default, the SDK batches spans using the [OpenTelemetry batch span processor](https://github.com/open-telemetry/opentelemetry-collector/blob/main/processor/batchprocessor/README.md). When working locally, sometime you may wish to disable this behavior. You can do that with this flag.
+
+
+
+ ```python
+ Traceloop.init(disable_batch=True)
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({ disableBatch: true });
+ ```
+
+
+
+## Disable Tracing of Prompt Content
+
+By default, OpenLLMetry logs prompts, completions, and embeddings to span attributes.
+
+However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
+
+
+
+ ```
+ TRACELOOP_TRACE_CONTENT=false
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({ traceContent: false });
+ ```
+
+
+
+## Disable Console Logging
+
+By default, the SDK outputs some debug logs to the console. You can disable this behavior with this flag.
+
+
+
+ ```python
+ Traceloop.init(suppress_logs=True)
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({ suppressLogs: true });
+ ```
+
+
+
+## Traceloop Sync
+
+By default, if you’re sending traces to Traceloop, then the Traceloop SDK’s sync functionality is also active. To disable it or change any defaults, see the example below. The values listed are the default values, so you don’t need to set them unless you want to change the defaults.
+
+
+ Traceloop Sync must be enabled in order to use the prompt registry.
+
+
+
+
+ ```
+ TRACELOOP_SYNC_ENABLED=true
+ TRACELOOP_SYNC_MAX_RETRIES=3
+ TRACELOOP_SYNC_POLLING_INTERVAL=60 # seconds
+ TRACELOOP_SYNC_DEV_POLLING_INTERVAL=5 # seconds
+ ```
+
+
+ ```python
+ Traceloop.init(traceloop_sync_enabled=True)
+ ```
+
+
+ ```javascript
+ Traceloop.initialize({
+ traceloopSyncEnabled: true,
+ traceloopSyncMaxRetries: 3,
+ traceloopSyncPollingInterval: 60, // in seconds
+ traceloopSyncDevPollingInterval: 5, // in seconds
+ });
+ ```
+
+
+
+## Instrumentations
+
+By default, the SDK automatically detects which models and frameworks you are using and instruments them for you. You can override this and specify specific frameworks and models you want to instrument. This, for example, allow you to specify that you want to log calls to OpenAI, but not Anthropic, or vice-versa.
+
+
+```python
+from traceloop.sdk.instruments import Instruments
+
+Traceloop.init(instruments={Instruments.OPENAI, Instruments.PINECONE})
+
+```
+
+
+
+
+```
diff --git a/fern/pages/openllmetry/quickstart/troubleshooting.mdx b/fern/pages/openllmetry/quickstart/troubleshooting.mdx
new file mode 100644
index 0000000..41bd010
--- /dev/null
+++ b/fern/pages/openllmetry/quickstart/troubleshooting.mdx
@@ -0,0 +1,96 @@
+---
+excerpt: Not seeing anything? Here are some things to check.
+---
+
+
+ ![openllmetry-troubleshooting](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-troubleshooting.png)
+
+
+We’ve all been there. You followed all the instructions, but you’re not seeing any traces. Let’s fix this.
+
+
+
+### Disable batch sending
+
+Sending traces in batch is useful in production, but can be confusing if you’re working locally. Make sure you’ve [disabled batch sending](/docs/openllmetry/quick-start/sdk-initialization-options#disable-batch).
+
+
+
+ ```python
+ Traceloop.init(disable_batch=True)
+ ```
+
+
+ ```javascript
+ Traceloop.init({ disableBatch: true });
+ ```
+
+
+
+### Check the logs
+
+When Traceloop initializes, it logs a message to the console, specifying the endpoint that it uses. If you don’t see that, you might not be initializing the SDK properly.
+
+
+ Traceloop exporting traces to `https://api.traceloop.com`
+
+
+### (TS/JS only) Fix known instrumentation issues
+
+If you’re using Typescript or Javascript, make sure to import traceloop before any other LLM libraries. This is because traceloop needs to instrument the libraries you’re using, and it can only do that if it’s imported first.
+
+```javascript
+import * as traceloop from "@traceloop/traceloop";
+import OpenAI from "openai";
+// ...
+```
+
+If that doesn’t work, you may need to manually instrument the libraries you’re using. See the [manual instrumentation guide](/docs/openllmetry/tracing/manual-implementations-typescript-javascript) for more details.
+
+```javascript
+import OpenAI from "openai";
+import * as LlamaIndex from "llamaindex";
+
+traceloop.initialize({
+ appName: "app",
+ instrumentModules: {
+ openAI: OpenAI,
+ llamaIndex: LlamaIndex,
+ // Add or omit other modules you'd like to instrument
+ },
+});
+```
+
+### Is your library supported yet?
+
+Check out [OpenLLMetry](https://github.com/traceloop/openllmetry#readme) or [OpenLLMetry-JS](https://github.com/traceloop/openllmetry-js#readme) README files to see which libraries and versions are currently supported. Contributions are always welcome! If you want to add support for a library, please open a PR.
+
+### Try outputting traces to the console
+
+Use the `ConsoleExporter` and check if you see traces in the console.
+
+
+
+ ```python
+ from opentelemetry.sdk.trace.export import ConsoleSpanExporter
+
+ Traceloop.init(exporter=ConsoleSpanExporter())
+ ```
+
+
+ ```javascript
+ import { ConsoleSpanExporter } from "@opentelemetry/sdk-trace-node";
+
+ traceloop.initialize({ exporter: new ConsoleSpanExporter() });
+ ```
+
+
+
+
+If you see traces in the console, then you probable haven’t configured the exporter properly. Check the [integration guide](/docs/openllmetry/integrations/overview) again, and make sure you’re using the right endpoint and API key.
+
+### Talk to us!
+
+We’re here to help. Reach out any time over [Slack](https://traceloop.com/slack?_gl=1*42xay7*_ga*NDc2NTEyODExLjE3MTAzMzg5OTc.*_ga_HPMNWFTJ5B*MTcxMDMzODk5Ni4xLjEuMTcxMDM0OTA2NS4wLjAuMA..), [email](mailto:dev@traceloop.com), and we’d love to assist you.
+
+
diff --git a/fern/pages/openllmetry/tracing/tracking-feedback.mdx b/fern/pages/openllmetry/tracing/tracking-feedback.mdx
new file mode 100644
index 0000000..9758a42
--- /dev/null
+++ b/fern/pages/openllmetry/tracing/tracking-feedback.mdx
@@ -0,0 +1,28 @@
+When building LLM applications, it quickly becomes highly useful and important to track user feedback on the result of your LLM workflow.
+
+Doing that with OpenLLMetry is easy. First, make sure you [associate your LLM workflow with unique identifiers](/docs/openllmetry/tracing/associating-entities-with-traces).
+
+Then, you can simply log a user feedback by calling our Python SDK or Typescript SDK. Feedbacks are always between -1 and 1, where -1 is the worst possible feedback and 1 is the best possible feedback.
+
+For example, if you provide your users with a thumbs-up / thumbs-down feedback, you should log thumbs-up as 1 and thumbs-down as -1.
+
+
+You can only report feedback for one association property at a time. So this call will throw an exception: `traceloop.reportScore({ chat_id: "12345", generation_id: "789", score: 1 });`
+
+
+
+
+ ```python
+ from traceloop.sdk import Traceloop
+
+ Traceloop.report_score("chat_id", "12345", 1)
+ ```
+
+
+
+ ```typescript
+ traceloop.reportScore({ chat_id: "12345" }, 1);
+ ```
+
+
+
diff --git a/openllmetry/getting-started-go.mdx b/openllmetry/getting-started-go.mdx
index 4396149..2521e18 100644
--- a/openllmetry/getting-started-go.mdx
+++ b/openllmetry/getting-started-go.mdx
@@ -33,7 +33,11 @@ func main() {
+<<<<<<< Updated upstream:openllmetry/getting-started-go.mdx
+=======
+ ![openllmetry-go](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-go.png)
+>>>>>>> Stashed changes:fern/pages/openllmetry/quickstart/go.mdx
For now, we don't automatically instrument libraries on Go (as opposed to Python and Javascript).
This will change in later versions.
@@ -104,7 +108,11 @@ func call_llm() {
Lastly, you'll need to configure where to export your traces.
The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`.
+<<<<<<< Updated upstream:openllmetry/getting-started-go.mdx
For Traceloop, read on. For other options, see [Exporting](/openllmetry/integrations/introduction).
+=======
+For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview).
+>>>>>>> Stashed changes:fern/pages/openllmetry/quickstart/go.mdx
### Using Traceloop Cloud
@@ -112,10 +120,19 @@ Go to [Traceloop](https://app.traceloop.com), and create a new account.
Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys.
Click **Generate API Key** to generate an API key for the developement environment and click **Copy API Key** to copy it over.
+<<<<<<< Updated upstream:openllmetry/getting-started-go.mdx
Make sure to copy it as it won't be shown again.
+=======
+
+ Make sure to copy it as it won’t be shown again.
+
+
+
+ ![openllmetry-go-2](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-go-2.png)
+>>>>>>> Stashed changes:fern/pages/openllmetry/quickstart/go.mdx
Set the copied Traceloop's API key as an environment variable in your app named `TRACELOOP_API_KEY`.
diff --git a/openllmetry/tracing/annotations.mdx b/openllmetry/tracing/annotations.mdx
index 54223d2..af18899 100644
--- a/openllmetry/tracing/annotations.mdx
+++ b/openllmetry/tracing/annotations.mdx
@@ -4,16 +4,28 @@ description: "Enrich your traces by annotating chains and workflows in your app"
---
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
+=======
+ ![tracing-worksflows-header](https://fern-image-hosting.s3.amazonaws.com/traceloop/tracing-workflows-header.png)
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
Traceloop SDK supports several ways to annotate workflows, tasks, agents and tools in your code to get a more complete picture of your app structure.
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
If you're using a framework like Langchain, Haystack or LlamaIndex - no need
to do anything! OpenLLMetry will automatically detect the framework and
annotate your traces.
+=======
+
+ If you’re using a framework like Langchain, Haystack or LlamaIndex - no need
+ to do anything! OpenLLMetry will automatically detect the framework and
+ annotate your traces.
+
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
## Workflows and Tasks
@@ -24,10 +36,17 @@ Sometimes called a "chain", intended for a multi-step process that can be traced
Use it as `@workflow(name="my_workflow")` or `@task(name="my_task")`.
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
The `name` argument is optional. If you don't provide it, we will use the
function name as the workflow or task name.
+=======
+
+ The `name` argument is optional. If you don’t provide it, we will use the
+ function name as the workflow or task name.
+
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
```python
from openai import OpenAI
@@ -61,6 +80,13 @@ def joke_workflow():
signature = generate_signature(pirate_joke)
print(pirate_joke + "\n\n" + signature)
```
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
+=======
+
+
+
+Use it as `withWorkflow("my_workflow", {}, () => ...)` or `withTask(name="my_task", () => ...)`. The function passed to `withWorkflow` or `withTask` will be part of the workflow or task and can be async or sync.
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
@@ -172,7 +198,70 @@ async function joke_workflow() {
);
}
```
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
+
+=======
+
+
+
+
+This feature is only available in Typescript. Unless you’re on Nest.js, you’ll need to update your `tsconfig.json` to enable decorators.
+
+
+Update `tsconfig.json` to enable decorators:
+
+```json
+{
+ "compilerOptions": {
+ "experimentalDecorators": true
+ }
+}
+```
+
+Use it in your code `@traceloop.workflow({ name: "my_workflow" })` for class methods only. We will consider every call to OpenAI as a distinct step (or task). You can even annotate the task with a name, using `@traceloop.task("my_task")`.
+
+
+ The `name` argument is optional. If you don’t provide it, we will use the
+ function name as the workflow or task name.
+
+
+```Typescript
+import * as traceloop from "@traceloop/node-server-sdk";
+
+class JokeCreation {
+ @traceloop.task({ name: "joke_creation" })
+ async create_joke() {
+ completion = await openai.chat.completions({
+ model: "gpt-3.5-turbo",
+ messages: [
+ { role: "user", content: "Tell me a joke about opentelemetry" },
+ ],
+ });
+
+ return completion.choices[0].message.content;
+ }
+
+ @traceloop.task({ name: "signature_generation" })
+ async generate_signature(joke: string) {
+ completion = await openai.completions.create({
+ model: "davinci-002",
+ prompt: "add a signature to the joke:\n\n" + joke,
+ });
+
+ return completion.choices[0].text;
+ }
+
+ @traceloop.workflow({ name: "pirate_joke_generator" })
+ async joke_workflow() {
+ eng_joke = create_joke();
+ pirate_joke = await translate_joke_to_pirate(eng_joke);
+ signature = await generate_signature(pirate_joke);
+ console.log(pirate_joke + "\n\n" + signature);
+ }
+}
+```
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
@@ -279,7 +368,49 @@ async function history_jokes_tool() {
}
}
```
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
+
+=======
+
+
+
+
+Similarily, if you use autonomous agents, you can use the `@agent` decorator to trace them as a single unit. Each tool should be marked with `@tool`.
+
+
+ If you’re not on Nest.js, remember to set `experimentalDecorators` to `true`
+ in your `tsconfig.json`.
+
+
+```typescript
+import * as traceloop from "@traceloop/node-server-sdk";
+
+class Agent {
+ @traceloop.agent({ name: "joke_translation" })
+ async translate_joke_to_pirate(joke: str) {
+ completion = await openai.chat.completions.create({
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": f"Translate the below joke to pirate-like english:\n\n{joke}"}],
+ });
+
+ history_jokes_tool();
+
+ return completion.choices[0].message.content;
+ }
+
+ @traceloop.tool({ name: "history_jokes" })
+ async history_jokes_tool() {
+ completion = await openai.chat.completions.create({
+ model="gpt-3.5-turbo",
+ messages=[{"role": "user", "content": f"get some history jokes"}],
+ });
+
+ return completion.choices[0].message.content;
+ }
+}
+```
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
@@ -290,7 +421,11 @@ In Typescript, you can use the same syntax for async methods.
In Python, you'll need to switch to an equivalent async decorator.
So, if you're decorating an `async` method, use `@aworkflow`, `@atask` and so forth.
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
See also a [separate section on using threads in Python with OpenLLMetry](/openllmetry/tracing/python-threads).
+=======
+See also a separate section on [using threads in Python with OpenLLMetry](/docs/openllmetry/tracing/usage-with-threads-python).
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx
## Decorating Classes (Python only)
@@ -305,12 +440,18 @@ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
@agent(name="base_joke_generator", method_name="generate_joke")
class JokeAgent:
- def generate_joke(self):
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": "Tell me a joke about Traceloop"}],
- )
+def generate_joke(self):
+completion = client.chat.completions.create(
+model="gpt-3.5-turbo",
+messages=[{"role": "user", "content": "Tell me a joke about Traceloop"}],
+)
return completion.choices[0].message.content
+
+```
+
```
+<<<<<<< Updated upstream:openllmetry/tracing/annotations.mdx
````
+=======
+>>>>>>> Stashed changes:fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx