@@ -67,7 +67,8 @@ def __init__(
6767 self .openai_api_client = AsyncOpenAI (
6868 base_url = endpoint .url ,
6969 http_client = DefaultAioHttpClient (
70- timeout = httpx .Timeout (timeout = request_timeout_seconds , connect = 5.0 ),
70+ timeout = httpx .Timeout (
71+ timeout = request_timeout_seconds , connect = 5.0 ),
7172 ),
7273 api_key = endpoint .api_key ,
7374 timeout = request_timeout_seconds ,
@@ -187,7 +188,9 @@ def estimated_num_performance_samples(self) -> int:
187188 """
188189 estimation_indices = random .sample (
189190 range (self .total_num_samples ),
190- k = min (MAX_NUM_ESTIMATION_PERFORMANCE_SAMPLES , self .total_num_samples ),
191+ k = min (
192+ MAX_NUM_ESTIMATION_PERFORMANCE_SAMPLES ,
193+ self .total_num_samples ),
191194 )
192195 estimation_samples = [
193196 self .formulate_loaded_sample (
@@ -274,7 +277,8 @@ def _unload_samples_from_ram(query_sample_indices: list[int]) -> None:
274277 _unload_samples_from_ram ,
275278 )
276279
277- async def _query_endpoint_async_batch (self , query_sample : lg .QuerySample ) -> None :
280+ async def _query_endpoint_async_batch (
281+ self , query_sample : lg .QuerySample ) -> None :
278282 """Query the endpoint through the async OpenAI API client."""
279283 try :
280284 sample = self .loaded_samples [query_sample .index ]
@@ -351,7 +355,8 @@ async def _query_endpoint_async_batch(self, query_sample: lg.QuerySample) -> Non
351355 ],
352356 )
353357
354- async def _query_endpoint_async_stream (self , query_sample : lg .QuerySample ) -> None :
358+ async def _query_endpoint_async_stream (
359+ self , query_sample : lg .QuerySample ) -> None :
355360 """Query the endpoint through the async OpenAI API client."""
356361 ttft_set = False
357362 try :
0 commit comments