@@ -181,16 +181,27 @@ def stream_answer_objects(
181
181
# retrieval_options = query_req.retrieval_options
182
182
# retrieval_options.filters.document_set =
183
183
184
- search_tool = SearchTool (
185
- db_session = db_session ,
186
- user = user ,
187
- persona = persona ,
188
- retrieval_options = query_req .retrieval_options ,
189
- prompt_config = prompt_config ,
190
- llm = llm ,
191
- fast_llm = fast_llm ,
192
- pruning_config = document_pruning_config ,
193
- bypass_acl = bypass_acl ,
184
+ contains_tool = True
185
+ if temporary_persona :
186
+ contains_tool = False
187
+ for tool in temporary_persona .tools :
188
+ if tool .in_code_tool_id == "SearchTool" :
189
+ contains_tool = True
190
+
191
+ search_tool = (
192
+ SearchTool (
193
+ db_session = db_session ,
194
+ user = user ,
195
+ persona = persona ,
196
+ retrieval_options = query_req .retrieval_options ,
197
+ prompt_config = prompt_config ,
198
+ llm = llm ,
199
+ fast_llm = fast_llm ,
200
+ pruning_config = document_pruning_config ,
201
+ bypass_acl = bypass_acl ,
202
+ )
203
+ if contains_tool
204
+ else None
194
205
)
195
206
196
207
answer_config = AnswerStyleConfig (
@@ -204,19 +215,27 @@ def stream_answer_objects(
204
215
prompt_config = PromptConfig .from_model (prompt ),
205
216
llm = get_main_llm_from_tuple (get_llms_for_persona (persona = persona )),
206
217
single_message_history = history_str ,
207
- tools = [search_tool ],
208
- force_use_tool = ForceUseTool (
209
- tool_name = search_tool .name ,
210
- args = {"query" : rephrased_query },
218
+ tools = [search_tool ] if search_tool else [],
219
+ force_use_tool = (
220
+ ForceUseTool (
221
+ tool_name = search_tool .name ,
222
+ args = {"query" : rephrased_query },
223
+ )
224
+ if search_tool
225
+ else None
211
226
),
212
227
# for now, don't use tool calling for this flow, as we haven't
213
228
# tested quotes with tool calling too much yet
214
229
skip_explicit_tool_calling = True ,
215
230
return_contexts = query_req .return_contexts ,
216
231
)
232
+ print ("ANS" )
233
+ print (answer .__dict__ )
217
234
# won't be any ImageGenerationDisplay responses since that tool is never passed in
218
235
dropped_inds : list [int ] = []
236
+
219
237
for packet in cast (AnswerObjectIterator , answer .processed_streamed_output ):
238
+ print (packet )
220
239
# for one-shot flow, don't currently do anything with these
221
240
if isinstance (packet , ToolResponse ):
222
241
if packet .id == SEARCH_RESPONSE_SUMMARY_ID :
@@ -250,6 +269,7 @@ def stream_answer_objects(
250
269
applied_time_cutoff = search_response_summary .final_filters .time_cutoff ,
251
270
recency_bias_multiplier = search_response_summary .recency_bias_multiplier ,
252
271
)
272
+
253
273
yield initial_response
254
274
elif packet .id == SECTION_RELEVANCE_LIST_ID :
255
275
chunk_indices = packet .response
0 commit comments