1
1
import Image from '@theme/IdealImage ';
2
+ import Tabs from '@theme/Tabs ';
3
+ import TabItem from '@theme/TabItem ';
2
4
3
5
# Langsmith - Logging LLM Input/Output
4
6
@@ -22,10 +24,13 @@ pip install litellm
22
24
## Quick Start
23
25
Use just 2 lines of code, to instantly log your responses ** across all providers** with Langsmith
24
26
27
+ <Tabs >
28
+ <TabItem value =" python " label =" SDK " >
25
29
26
30
``` python
27
- litellm.success_callback = [" langsmith" ]
31
+ litellm.callbacks = [" langsmith" ]
28
32
```
33
+
29
34
``` python
30
35
import litellm
31
36
import os
@@ -37,7 +42,7 @@ os.environ["LANGSMITH_DEFAULT_RUN_NAME"] = "" # defaults to LLMRun
37
42
os.environ[' OPENAI_API_KEY' ]= " "
38
43
39
44
# set langsmith as a callback, litellm will send the data to langsmith
40
- litellm.success_callback = [" langsmith" ]
45
+ litellm.callbacks = [" langsmith" ]
41
46
42
47
# openai call
43
48
response = litellm.completion(
@@ -47,8 +52,124 @@ response = litellm.completion(
47
52
]
48
53
)
49
54
```
55
+ </TabItem >
56
+ <TabItem value =" proxy " label =" LiteLLM Proxy " >
57
+
58
+ 1 . Setup config.yaml
59
+ ``` yaml
60
+ model_list :
61
+ - model_name : gpt-3.5-turbo
62
+ litellm_params :
63
+ model : openai/gpt-3.5-turbo
64
+ api_key : os.environ/OPENAI_API_KEY
65
+
66
+ litellm_settings :
67
+ callbacks : ["langsmith"]
68
+ ` ` `
69
+
70
+ 2. Start LiteLLM Proxy
71
+ ` ` ` bash
72
+ litellm --config /path/to/config.yaml
73
+ ```
74
+
75
+ 3 . Test it!
76
+ ``` bash
77
+ curl -L -X POST ' http://0.0.0.0:4000/v1/chat/completions' \
78
+ -H ' Content-Type: application/json' \
79
+ -H ' Authorization: Bearer sk-eWkpOhYaHiuIZV-29JDeTQ' \
80
+ -d ' {
81
+ "model": "gpt-3.5-turbo",
82
+ "messages": [
83
+ {
84
+ "role": "user",
85
+ "content": "Hey, how are you?"
86
+ }
87
+ ],
88
+ "max_completion_tokens": 250
89
+ }'
90
+ ```
91
+ </TabItem >
92
+ </Tabs >
93
+
94
+
50
95
51
96
## Advanced
97
+
98
+ ### Local Testing - Control Batch Size
99
+
100
+ Set the size of the batch that Langsmith will process at a time, default is 512.
101
+
102
+ Set ` langsmith_batch_size=1 ` when testing locally, to see logs land quickly.
103
+
104
+ <Tabs >
105
+ <TabItem value =" python " label =" SDK " >
106
+
107
+ ``` python
108
+ import litellm
109
+ import os
110
+
111
+ os.environ[" LANGSMITH_API_KEY" ] = " "
112
+ # LLM API Keys
113
+ os.environ[' OPENAI_API_KEY' ]= " "
114
+
115
+ # set langsmith as a callback, litellm will send the data to langsmith
116
+ litellm.callbacks = [" langsmith" ]
117
+ litellm.langsmith_batch_size = 1 # 👈 KEY CHANGE
118
+
119
+ response = litellm.completion(
120
+ model = " gpt-3.5-turbo" ,
121
+ messages = [
122
+ {" role" : " user" , " content" : " Hi 👋 - i'm openai" }
123
+ ]
124
+ )
125
+ print (response)
126
+ ```
127
+ </TabItem >
128
+ <TabItem value =" proxy " label =" LiteLLM Proxy " >
129
+
130
+ 1 . Setup config.yaml
131
+ ``` yaml
132
+ model_list :
133
+ - model_name : gpt-3.5-turbo
134
+ litellm_params :
135
+ model : openai/gpt-3.5-turbo
136
+ api_key : os.environ/OPENAI_API_KEY
137
+
138
+ litellm_settings :
139
+ langsmith_batch_size : 1
140
+ callbacks : ["langsmith"]
141
+ ` ` `
142
+
143
+ 2. Start LiteLLM Proxy
144
+ ` ` ` bash
145
+ litellm --config /path/to/config.yaml
146
+ ```
147
+
148
+ 3 . Test it!
149
+ ``` bash
150
+ curl -L -X POST ' http://0.0.0.0:4000/v1/chat/completions' \
151
+ -H ' Content-Type: application/json' \
152
+ -H ' Authorization: Bearer sk-eWkpOhYaHiuIZV-29JDeTQ' \
153
+ -d ' {
154
+ "model": "gpt-3.5-turbo",
155
+ "messages": [
156
+ {
157
+ "role": "user",
158
+ "content": "Hey, how are you?"
159
+ }
160
+ ],
161
+ "max_completion_tokens": 250
162
+ }'
163
+ ```
164
+
165
+
166
+
167
+ </TabItem >
168
+ </Tabs >
169
+
170
+
171
+
172
+
52
173
### Set Langsmith fields
53
174
54
175
``` python
0 commit comments