|
10 | 10 | }, |
11 | 11 | { |
12 | 12 | "cell_type": "code", |
13 | | - "execution_count": 21, |
14 | 13 | "id": "be42e7c5-b2af-476f-8041-c17be56edb52", |
15 | 14 | "metadata": {}, |
16 | | - "outputs": [ |
17 | | - { |
18 | | - "name": "stdout", |
19 | | - "output_type": "stream", |
20 | | - "text": [ |
21 | | - "> 2025-12-03 07:17:36,530 [info] Project loaded successfully: {\"project_name\":\"langchain-example-10\"}\n" |
22 | | - ] |
23 | | - } |
24 | | - ], |
25 | 15 | "source": [ |
26 | 16 | "%config Completer.use_jedi = False\n", |
27 | 17 | "\n", |
|
31 | 21 | "image = \"mlrun/mlrun\"\n", |
32 | 22 | "project_name = \"langchain-example\"\n", |
33 | 23 | "project = get_or_create_project(project_name, context=\"./\", allow_cross_project=True)" |
34 | | - ] |
| 24 | + ], |
| 25 | + "outputs": [], |
| 26 | + "execution_count": null |
35 | 27 | }, |
36 | 28 | { |
37 | 29 | "cell_type": "markdown", |
|
43 | 35 | }, |
44 | 36 | { |
45 | 37 | "cell_type": "code", |
46 | | - "execution_count": 23, |
47 | 38 | "id": "a47d7789-2ea2-493e-8905-f53b978e2abd", |
48 | 39 | "metadata": {}, |
49 | | - "outputs": [], |
50 | 40 | "source": [ |
51 | 41 | "# Create project secrets for project\n", |
52 | 42 | "secrets = {\"OPENAI_API_KEY\": \"\", # add your OpenAI API key here\n", |
53 | 43 | " \"OPENAI_BASE_URL\": \"\" # add your OpenAI base url here if needed\n", |
54 | 44 | " }\n", |
55 | 45 | "project.set_secrets(secrets=secrets, provider=\"kubernetes\")" |
56 | | - ] |
| 46 | + ], |
| 47 | + "outputs": [], |
| 48 | + "execution_count": null |
57 | 49 | }, |
58 | 50 | { |
59 | 51 | "cell_type": "markdown", |
|
65 | 57 | }, |
66 | 58 | { |
67 | 59 | "cell_type": "code", |
68 | | - "execution_count": null, |
69 | 60 | "id": "25cbd982-86de-43b5-91ef-24fc60b2d758", |
70 | 61 | "metadata": {}, |
71 | | - "outputs": [], |
72 | 62 | "source": [ |
73 | 63 | "%%writefile langchain_model.py\n", |
74 | 64 | "\n", |
|
197 | 187 | " result[\"total_cost_usd\"] = input_cost + output_cost\n", |
198 | 188 | " return result\n", |
199 | 189 | " " |
200 | | - ] |
| 190 | + ], |
| 191 | + "outputs": [], |
| 192 | + "execution_count": null |
201 | 193 | }, |
202 | 194 | { |
203 | 195 | "cell_type": "markdown", |
|
209 | 201 | }, |
210 | 202 | { |
211 | 203 | "cell_type": "code", |
212 | | - "execution_count": 81, |
213 | 204 | "id": "691e9068-ec9c-40d6-9ac8-e6c3e605b44c", |
214 | 205 | "metadata": {}, |
215 | | - "outputs": [ |
216 | | - { |
217 | | - "name": "stdout", |
218 | | - "output_type": "stream", |
219 | | - "text": [ |
220 | | - "> 2025-12-03 10:55:46,194 [info] Project loaded successfully: {\"project_name\":\"langchain-example-10\"}\n", |
221 | | - "> 2025-12-03 10:55:46,463 [info] Model monitoring credentials were set successfully. Please keep in mind that if you already had model monitoring functions / model monitoring infra / tracked model server deployed on your project, you will need to redeploy them. For redeploying the model monitoring infra, first disable it using `project.disable_model_monitoring()` and then enable it using `project.enable_model_monitoring()`.\n", |
222 | | - "details: MLRunConflictError(\"The following model-montioring infrastructure functions are already deployed, aborting: ['model-monitoring-controller', 'model-monitoring-writer']\\nIf you want to redeploy the model-monitoring controller (maybe with different base-period), use update_model_monitoring_controller.If you want to redeploy all of model-monitoring infrastructure, call disable_model_monitoringbefore calling enable_model_monitoring again.\")\n" |
223 | | - ] |
224 | | - } |
225 | | - ], |
226 | 206 | "source": [ |
227 | 207 | "module = mlrun.import_module(\"hub://agent_deployer\")\n", |
228 | 208 | "\n", |
|
237 | 217 | " prompt_template= \"\"\"\n", |
238 | 218 | " Answer the following questions as best you can.\n", |
239 | 219 | " You have access to the following tools:\n", |
240 | | - " {tools}\n", |
| 220 | + " {{tools}}\n", |
241 | 221 | " Use the following format:\n", |
242 | 222 | " Question: the input question you must answer\n", |
243 | 223 | " Thought: you should always think about what to do\n", |
244 | | - " Action: the action to take, should be one of [{tool_names}]\n", |
| 224 | + " Action: the action to take, should be one of [{{tool_names}}]\n", |
245 | 225 | " Action Input: the input to the action\n", |
246 | 226 | " Observation: the result of the action\n", |
247 | 227 | " ... (this Thought/Action/Action Input/Observation can repeat N times)\n", |
|
252 | 232 | " Question: {input}\n", |
253 | 233 | " Thought:{agent_scratchpad}\n", |
254 | 234 | " \"\"\",\n", |
255 | | - ")" |
256 | | - ] |
| 235 | + ")\n" |
| 236 | + ], |
| 237 | + "outputs": [], |
| 238 | + "execution_count": null |
257 | 239 | }, |
258 | 240 | { |
259 | 241 | "cell_type": "code", |
260 | | - "execution_count": 82, |
261 | 242 | "id": "0bb1c4d1-5d7c-4d1c-bf51-8f53b319e91f", |
262 | 243 | "metadata": {}, |
| 244 | + "source": "func = agent.deploy_function(enable_tracking=True)", |
263 | 245 | "outputs": [], |
264 | | - "source": "func = agent.deploy_function(enable_tracking=True)" |
| 246 | + "execution_count": null |
265 | 247 | }, |
266 | 248 | { |
267 | 249 | "metadata": {}, |
|
272 | 254 | { |
273 | 255 | "metadata": {}, |
274 | 256 | "cell_type": "code", |
| 257 | + "source": "func.invoke(\"./\", {\"question\" : \"If a pizza costs $18.75 and I want to buy 3, what is the total cost?\"})", |
| 258 | + "id": "ac5c3ba174d2cf8b", |
275 | 259 | "outputs": [], |
276 | | - "execution_count": null, |
277 | | - "source": "func.invoke(\"./\", {\"question\" : \"If a pizza costs $18.75 and I want to buy 3, plus a 15% tip, what is the total cost?\"})", |
278 | | - "id": "ac5c3ba174d2cf8b" |
| 260 | + "execution_count": null |
279 | 261 | }, |
280 | 262 | { |
281 | 263 | "metadata": {}, |
|
289 | 271 | { |
290 | 272 | "metadata": {}, |
291 | 273 | "cell_type": "code", |
292 | | - "outputs": [], |
293 | | - "execution_count": null, |
294 | 274 | "source": [ |
295 | 275 | "%%writefile monitoring_application.py\n", |
296 | 276 | "\n", |
|
405 | 385 | " value=value,\n", |
406 | 386 | " )\n" |
407 | 387 | ], |
408 | | - "id": "377487422f5ed289" |
| 388 | + "id": "377487422f5ed289", |
| 389 | + "outputs": [], |
| 390 | + "execution_count": null |
409 | 391 | }, |
410 | 392 | { |
411 | 393 | "metadata": {}, |
|
416 | 398 | { |
417 | 399 | "metadata": {}, |
418 | 400 | "cell_type": "code", |
419 | | - "outputs": [], |
420 | | - "execution_count": null, |
421 | 401 | "source": [ |
422 | 402 | "llm_monitoring_app = project.set_model_monitoring_function(\n", |
423 | 403 | " func=\"monitoring_application.py\",\n", |
|
428 | 408 | "\n", |
429 | 409 | "project.deploy_function(llm_monitoring_app)" |
430 | 410 | ], |
431 | | - "id": "9d6ad2a4a47a44bd" |
| 411 | + "id": "9d6ad2a4a47a44bd", |
| 412 | + "outputs": [], |
| 413 | + "execution_count": null |
432 | 414 | } |
433 | 415 | ], |
434 | 416 | "metadata": { |
|
0 commit comments