Skip to content

Commit 2729387

Browse files
committed
Rework tracer
1 parent 1a66c33 commit 2729387

File tree

2 files changed

+229
-0
lines changed

2 files changed

+229
-0
lines changed

graphsignal/client/models/rate.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
# coding: utf-8
2+
3+
"""
4+
Graphsignal API
5+
6+
API for uploading and querying spans, scores, metrics, and logs.
7+
8+
The version of the OpenAPI document: 1.0.0
9+
Generated by OpenAPI Generator (https://openapi-generator.tech)
10+
11+
Do not edit the class manually.
12+
""" # noqa: E501
13+
14+
15+
from __future__ import annotations
16+
import pprint
17+
import re # noqa: F401
18+
import json
19+
20+
from pydantic import BaseModel, ConfigDict, Field, StrictFloat, StrictInt
21+
from typing import Any, ClassVar, Dict, List, Union
22+
from typing import Optional, Set
23+
from typing_extensions import Self
24+
25+
class Rate(BaseModel):
26+
"""
27+
Rate
28+
""" # noqa: E501
29+
count: Union[StrictFloat, StrictInt] = Field(description="The count value.")
30+
interval: Union[StrictFloat, StrictInt] = Field(description="The the interval value.")
31+
__properties: ClassVar[List[str]] = ["count", "interval"]
32+
33+
model_config = ConfigDict(
34+
populate_by_name=True,
35+
validate_assignment=True,
36+
protected_namespaces=(),
37+
)
38+
39+
40+
def to_str(self) -> str:
41+
"""Returns the string representation of the model using alias"""
42+
return pprint.pformat(self.model_dump(by_alias=True))
43+
44+
def to_json(self) -> str:
45+
"""Returns the JSON representation of the model using alias"""
46+
# TODO: pydantic v2: use .model_dump_json(by_alias=True, exclude_unset=True) instead
47+
return json.dumps(self.to_dict())
48+
49+
@classmethod
50+
def from_json(cls, json_str: str) -> Optional[Self]:
51+
"""Create an instance of Rate from a JSON string"""
52+
return cls.from_dict(json.loads(json_str))
53+
54+
def to_dict(self) -> Dict[str, Any]:
55+
"""Return the dictionary representation of the model using alias.
56+
57+
This has the following differences from calling pydantic's
58+
`self.model_dump(by_alias=True)`:
59+
60+
* `None` is only added to the output dict for nullable fields that
61+
were set at model initialization. Other fields with value `None`
62+
are ignored.
63+
"""
64+
excluded_fields: Set[str] = set([
65+
])
66+
67+
_dict = self.model_dump(
68+
by_alias=True,
69+
exclude=excluded_fields,
70+
exclude_none=True,
71+
)
72+
return _dict
73+
74+
@classmethod
75+
def from_dict(cls, obj: Optional[Dict[str, Any]]) -> Optional[Self]:
76+
"""Create an instance of Rate from a dict"""
77+
if obj is None:
78+
return None
79+
80+
if not isinstance(obj, dict):
81+
return cls.model_validate(obj)
82+
83+
_obj = cls.model_validate({
84+
"count": obj.get("count"),
85+
"interval": obj.get("interval")
86+
})
87+
return _obj
88+
89+

test/callbacks/langchain/test_v1.py

Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
import unittest
2+
import logging
3+
import sys
4+
import os
5+
import json
6+
import time
7+
from unittest.mock import patch, Mock
8+
import pprint
9+
import openai
10+
from langchain_core.tools import tool
11+
from langchain_community.agent_toolkits.load_tools import load_tools
12+
from langchain.agents import AgentExecutor, create_react_agent
13+
from typing import Any, List, Mapping, Optional
14+
from langchain.llms.base import BaseLLM
15+
from langchain_community.llms.fake import FakeListLLM
16+
from langchain.schema import LLMResult, Generation
17+
from langchain.chains import LLMChain
18+
from langchain.prompts import PromptTemplate
19+
from langchain import hub
20+
from langchain_core.messages import HumanMessage
21+
22+
import graphsignal
23+
from graphsignal.uploader import Uploader
24+
from graphsignal.recorders.openai_recorder import OpenAIRecorder
25+
from test.model_utils import find_tag, find_usage, find_payload
26+
27+
logger = logging.getLogger('graphsignal')
28+
29+
@tool
30+
def multiply(first_int: int, second_int: int) -> int:
31+
"""Multiply two integers together."""
32+
return first_int * second_int
33+
34+
class GraphsignalCallbackHandlerTest(unittest.IsolatedAsyncioTestCase):
35+
async def asyncSetUp(self):
36+
if len(logger.handlers) == 0:
37+
logger.addHandler(logging.StreamHandler(sys.stdout))
38+
graphsignal.configure(
39+
api_key='k1',
40+
deployment='d1',
41+
upload_on_shutdown=False,
42+
debug_mode=True)
43+
44+
async def asyncTearDown(self):
45+
graphsignal.shutdown()
46+
47+
48+
@patch.object(Uploader, 'upload_span')
49+
async def test_callback_tags(self, mocked_upload_span):
50+
from graphsignal.callbacks.langchain import GraphsignalCallbackHandler
51+
llm = FakeListLLM(
52+
responses=['Final Answer:42'],
53+
callbacks=[GraphsignalCallbackHandler(tags=dict(k1='v1'))]
54+
)
55+
56+
llm.invoke([HumanMessage(content="Tell me a joke")])
57+
58+
t1 = mocked_upload_span.call_args_list[0][0][0]
59+
60+
self.assertEqual(find_tag(t1, 'k1'), 'v1')
61+
62+
63+
@patch.object(Uploader, 'upload_span')
64+
@patch('graphsignal.callbacks.langchain.v1.uuid_sha1', return_value='s1')
65+
async def test_chain(self, mocked_uuid_sha1, mocked_upload_span):
66+
os.environ["LANGCHAIN_TRACING_V2"] = "false"
67+
68+
graphsignal.set_context_tag('ct1', 'v1')
69+
70+
llm = FakeListLLM(responses=['Final Answer:42'])
71+
72+
tools = [multiply]
73+
prompt = hub.pull("hwchase17/react")
74+
agent = create_react_agent(llm, tools, prompt)
75+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
76+
agent_executor.invoke({"input": "What is 2 times 4?"})
77+
78+
def find_span(op_name):
79+
for call in mocked_upload_span.call_args_list:
80+
check_op_name = find_tag(call[0][0], 'operation')
81+
if check_op_name == op_name:
82+
return call[0][0]
83+
84+
llm_span = find_span('langchain_community.llms.fake.FakeListLLM')
85+
self.assertEqual(find_tag(llm_span, 'model_type'), 'chat')
86+
self.assertEqual(find_tag(llm_span, 'ct1'), 'v1')
87+
88+
@patch.object(Uploader, 'upload_span')
89+
@patch('graphsignal.callbacks.langchain.v1.uuid_sha1', return_value='s1')
90+
async def test_chain_async(self, mocked_uuid_sha1, mocked_upload_span):
91+
os.environ["LANGCHAIN_TRACING_V2"] = "false"
92+
93+
graphsignal.set_context_tag('ct1', 'v1')
94+
95+
llm = FakeListLLM(responses=['Final Answer:42'])
96+
tools = [multiply]
97+
prompt = hub.pull("hwchase17/react")
98+
agent = create_react_agent(llm, tools, prompt)
99+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
100+
await agent_executor.ainvoke({"input": "What is 2 times 4?"})
101+
102+
def find_span(op_name):
103+
for call in mocked_upload_span.call_args_list:
104+
check_op_name = find_tag(call[0][0], 'operation')
105+
if check_op_name == op_name:
106+
return call[0][0]
107+
108+
llm_span = find_span('langchain_community.llms.fake.FakeListLLM')
109+
self.assertEqual(find_tag(llm_span, 'model_type'), 'chat')
110+
111+
@patch.object(Uploader, 'upload_span')
112+
@patch('graphsignal.callbacks.langchain.v1.uuid_sha1', return_value='s1')
113+
async def test_chain_async_with_decorator(self, mocked_uuid_sha1, mocked_upload_span):
114+
prompt = PromptTemplate(
115+
input_variables=["product"],
116+
template="What is a good name for a company that makes {product}?",
117+
)
118+
119+
llm = FakeListLLM(responses=['Final Answer:42'])
120+
chain = LLMChain(llm=llm, prompt=prompt)
121+
122+
@graphsignal.trace_function
123+
async def run_chain():
124+
graphsignal.set_context_tag('session_id', 's2')
125+
await chain.ainvoke("colorful socks")
126+
127+
await run_chain()
128+
129+
def find_span(op_name):
130+
for call in mocked_upload_span.call_args_list:
131+
check_op_name = find_tag(call[0][0], 'operation')
132+
if check_op_name == op_name:
133+
return call[0][0]
134+
135+
run_chain_span = find_span('run_chain')
136+
self.assertIsNotNone(run_chain_span)
137+
138+
llm_span = find_span('langchain_community.llms.fake.FakeListLLM')
139+
self.assertEqual(find_tag(llm_span, 'library'), 'langchain')
140+
self.assertEqual(find_tag(llm_span, 'session_id'), 's2')

0 commit comments

Comments
 (0)