diff --git a/fern/docs/pages/llm-evaluation/prompt-optimization/using-prompts.mdx b/fern/docs/pages/llm-evaluation/prompt-optimization/using-prompts.mdx
index 9b2ff02..39f7ec6 100644
--- a/fern/docs/pages/llm-evaluation/prompt-optimization/using-prompts.mdx
+++ b/fern/docs/pages/llm-evaluation/prompt-optimization/using-prompts.mdx
@@ -329,3 +329,76 @@ curl -X POST "https://api.openai.com/v1/chat/completions" \
+
+## Logging Prompts in Traces
+
+
+
+
+
+Attach the `@observe` decorator to functions/methods that make up your agent, and specify type `llm` for your LLM-calling functions.
+
+```python main.py {4}
+from deepeval.tracing import observe
+
+@observe(type="llm", model="gpt-4.1")
+def your_llm_component():
+ ...
+```
+
+
+ Specifying the type is necessary because logging prompts is only available for
+ LLM spans.
+
+
+
+
+
+
+Pull and interpolate the prompt version to use it for LLM generation.
+
+```python main.py {8,9}
+from deepeval.tracing import observe
+from deepeval.prompt import Prompt
+from openai import OpenAI
+
+@observe(type="llm", model="gpt-4.1")
+def your_llm_component():
+ prompt = Prompt(alias="YOUR-PROMPT-ALIAS")
+ prompt.pull()
+ interpolated_prompt = prompt.interpolate(name="Joe")
+ response = OpenAI().chat.completions.create(model="gpt-4o-mini", messages=interpolated_prompt)
+ return response.choices[0].message.content
+```
+
+
+
+
+
+Then simply provide the prompt to the `update_llm_span` function.
+
+```python main.py {11}
+from deepeval.tracing import observe, update_llm_span
+from deepeval.prompt import Prompt
+from openai import OpenAI
+
+@observe(type="llm", model="gpt-4.1")
+def your_llm_component():
+ prompt = Prompt(alias="YOUR-PROMPT-ALIAS")
+ prompt.pull()
+ interpolated_prompt = prompt.interpolate(name="Joe")
+ response = OpenAI().chat.completions.create(model="gpt-4o-mini", messages=interpolated_prompt)
+ update_llm_span(prompt=prompt)
+ return response.choices[0].message.content
+```
+
+
+ Remember to pull the prompt before updating the span, otherwise the prompt
+ will not be logged.
+
+
+This will automatically attribute the prompt used to the LLM span.
+
+
+
+
diff --git a/fern/docs/pages/llm-tracing/advanced-features/attributes.mdx b/fern/docs/pages/llm-tracing/advanced-features/attributes.mdx
index 16e50b6..33a1914 100644
--- a/fern/docs/pages/llm-tracing/advanced-features/attributes.mdx
+++ b/fern/docs/pages/llm-tracing/advanced-features/attributes.mdx
@@ -8,7 +8,9 @@ slug: llm-tracing/advanced-features/attributes
Attributes are like [`metadata`](/docs/llm-tracing/advanced-features/metadata) but specific to different span `type`s.
-Setting attributes will make the tracing UI easier to navigate on Confident AI, but is by no means required. You also cannot set attributes for custom [span types](/docs/llm-tracing/advanced-features/span-types).
+ Setting attributes will make the tracing UI easier to navigate on Confident
+ AI, but is by no means required. You also cannot set attributes for custom
+ [span types](/docs/llm-tracing/advanced-features/span-types).
## Set Attributes At Runtime
@@ -16,14 +18,12 @@ Setting attributes will make the tracing UI easier to navigate on Confident AI,
Attributes are set at runtime using update functions specific to each span type:
-
- - `update_llm_span`
- - `update_retriever_span`
-
-
- - `updateLlmSpan`
- - `updateRetrieverSpan`
-
+
+ - `update_llm_span` - `update_retriever_span`
+
+
+ - `updateLlmSpan` - `updateRetrieverSpan`
+
These functions update the attributes for the **CURRENT** span of the component to which the `@observe` decorator is applied. For example, `update_retriever_span` will update the attributes for `inner_function`.
@@ -81,13 +81,16 @@ LLM attributes track the model, prompt, and token usage and costs of language mo
- ```python title="main.py" {6}
+ ```python title="main.py" {8}
from deepeval.tracing import observe, update_llm_span
@observe(type="llm", model="gpt-4.1")
- def generate_response(prompt):
- output = "Generated response to: " + prompt
+ def generate_response(input):
+ prompt = Prompt(alias="My Prompt")
+ prompt.pull("00.00.01")
+ output = "Generated response to: " + input
update_llm_span(
+ prompt=prompt,
input_token_count=10,
output_token_count=25,
cost_per_input_token=0.01,
@@ -99,6 +102,7 @@ LLM attributes track the model, prompt, and token usage and costs of language mo
There are **SIX** optional parameters for `update_llm_span`:
- [Optional] `model`: The model used, of type `str`.
+ - [Optional] `prompt`: The prompt of type `Prompt`, which must be pulled prior to updating the span.
- [Optional] `input_token_count`: The number of tokens of type `float` in the input.
- [Optional] `output_token_count`: The number of tokens of type `float` in the generated response.
- [Optional] `cost_per_input_token`: The cost per input token of type `float`.
@@ -135,9 +139,9 @@ LLM attributes track the model, prompt, and token usage and costs of language mo
-
-The model and per-token costs can be set in the `@observe` decorator, but will be overridden if set in `update_llm_span`.
+ The model and per-token costs can be set in the `@observe` decorator, but will
+ be overridden if set in `update_llm_span`.
### Retriever attributes
@@ -170,17 +174,17 @@ Retriever attributes track the `embedder`, `top_k`, and `chunk_size` in RAG pipe
```typescript title="index.ts" {5}
import { observe, updateRetrieverSpan } from '@deepeval-ts/tracing';
-
+
const retrieveDocuments = (query: string): string[] => {
const fetchedDocuments = ["doc1", "doc2"];
- updateRetrieverSpan({
- embedder: "text-embedding-ada-002",
- chunkSize: 10,
- topK: 5
+ updateRetrieverSpan({
+ embedder: "text-embedding-ada-002",
+ chunkSize: 10,
+ topK: 5
});
return fetchedDocuments;
};
-
+
const observeDocuments = observe({ type: "retriever", embedder: "text-embedding-ada-002", fn: retrieveDocuments });
```
@@ -191,4 +195,5 @@ Retriever attributes track the `embedder`, `top_k`, and `chunk_size` in RAG pipe
- [Optional] `topK`: The number of text chunks retrieved of type `int` from the vector store.
+
diff --git a/fern/openapi.yaml b/fern/openapi.yaml
index 968f05e..2f4ffb0 100644
--- a/fern/openapi.yaml
+++ b/fern/openapi.yaml
@@ -2124,6 +2124,11 @@ components:
model:
type: string
description: This is the LLM model used in the span.
+ prompt:
+ type: object
+ additionalProperties: false
+ $ref: "#/components/schemas/PromptApi"
+ description: This is the prompt object used in the span.
costPerInputToken:
type: number
format: float
@@ -2141,6 +2146,16 @@ components:
required:
- model
+ PromptApi:
+ type: object
+ properties:
+ alias:
+ type: string
+ description: This is the alias of the prompt.
+ version:
+ type: string
+ description: This is the version of the prompt.
+
RetrieverSpan:
type: object
properties: