Skip to content

Commit b8470c8

Browse files
authored
Merge pull request #726 from microsoft/joshuakr/7653-Comparison-Fix
Joshuakr/7653 comparison fix
2 parents 93615a1 + 4578b80 commit b8470c8

File tree

3 files changed

+7
-11
lines changed

3 files changed

+7
-11
lines changed

Diff for: Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -87,4 +87,4 @@ run-data-migration: ## Run the data migration moving data from one resource grou
8787
python ./scripts/extract-content.py
8888

8989
manual-inf-destroy: ## A command triggered by a user to destroy a resource group, associated resources, and related Entra items
90-
@./scripts/inf-manual-destroy.sh
90+
@./scripts/inf-manual-destroy.sh

Diff for: app/backend/approaches/comparewebwithwork.py

+3-5
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ async def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]
136136
eventJson = json.loads(event)
137137
if "work_citation_lookup" in eventJson:
138138
work_citations = eventJson["work_citation_lookup"]
139-
elif "content" in eventJson:
139+
elif "content" in eventJson and eventJson["content"] != None:
140140
content += eventJson["content"]
141141

142142
thought_chain["work_response"] = content
@@ -167,12 +167,10 @@ async def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]
167167
msg_to_display = '\n\n'.join([str(message) for message in messages])
168168
try:
169169
# Step 3: Final comparative analysis using OpenAI Chat Completion
170-
chat_completion = await openai.ChatCompletion.acreate(
171-
deployment_id=self.chatgpt_deployment,
172-
model=self.model_name,
170+
chat_completion = await self.client.chat.completions.create(
171+
model=self.chatgpt_deployment,
173172
messages=messages,
174173
temperature=float(overrides.get("response_temp")) or 0.6,
175-
max_tokens=1024,
176174
n=1,
177175
stream=True)
178176

Diff for: app/backend/approaches/compareworkwithweb.py

+3-5
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ async def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]
8989
eventJson = json.loads(event)
9090
if "web_citation_lookup" in eventJson:
9191
self.web_citations = eventJson["web_citation_lookup"]
92-
elif "content" in eventJson:
92+
elif "content" in eventJson and eventJson["content"] != None:
9393
content += eventJson["content"]
9494

9595
thought_chain["web_response"] = content
@@ -120,12 +120,10 @@ async def run(self, history: Sequence[dict[str, str]], overrides: dict[str, Any]
120120
msg_to_display = '\n\n'.join([str(message) for message in messages])
121121
try:
122122
# Step 3: Final comparative analysis using OpenAI Chat Completion
123-
chat_completion = await openai.ChatCompletion.acreate(
124-
deployment_id=self.chatgpt_deployment,
125-
model=self.model_name,
123+
chat_completion = await self.client.chat.completions.create(
124+
model=self.chatgpt_deployment,
126125
messages=messages,
127126
temperature=float(overrides.get("response_temp")) or 0.6,
128-
max_tokens=1024,
129127
n=1,
130128
stream=True)
131129

0 commit comments

Comments
 (0)