Skip to content

Commit f9073ac

Browse files
committed
API update for timestamp and version showing
1 parent 142346b commit f9073ac

File tree

2 files changed

+33
-20
lines changed

2 files changed

+33
-20
lines changed

functions.py

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -111,39 +111,47 @@ def CheckItemStatus(target_id):
111111
return [{'qid': target_id, 'status': 'Not processed yet'}]
112112

113113

114-
#1.2. calculate the reference healthy value for an item
114+
#1.2. calculate the reference score for an item
115115
#Examples = Q5820 : error/ Q5208 : good/ Q42220 : None.
116116
def comprehensive_results(target_id):
117117
response = GetItem(target_id)
118118
if isinstance(response, list) and len(response) > 0:
119119
first_item = response[0]
120120
if isinstance(first_item, dict):
121121
if 'error' in first_item:
122-
return {'Reference score': 'Not processed yet',
122+
return {'Reference_score': 'Not processed yet',
123123
'NOT ENOUGH INFO': 'Not processed yet',
124124
'SUPPORTS': 'Not processed yet',
125-
'REFUTES': 'Not processed yet'
125+
'REFUTES': 'Not processed yet',
126+
'algo_version': first_item['algo_version'],
127+
'Requested_time': first_item['start_time']
126128
}
127129
elif 'status' in first_item and first_item['status'] == 'error':
128-
return {'Reference score': 'processing error',
130+
return {'Reference_score': 'processing error',
129131
'NOT ENOUGH INFO': 'processing error',
130132
'SUPPORTS': 'processing error',
131-
'REFUTES': 'processing error'
133+
'REFUTES': 'processing error',
134+
'algo_version': first_item['algo_version'],
135+
'Requested_time': first_item['start_time']
132136
}
133137
elif response[1].get('Result') == 'No available URLs':
134-
return {'Reference score': 'No external URLs',
138+
return {'Reference_score': 'No external URLs',
135139
'NOT ENOUGH INFO': 'No external URLs',
136140
'SUPPORTS': 'No external URLs',
137-
'REFUTES': 'No external URLs'
141+
'REFUTES': 'No external URLs',
142+
'algo_version': first_item['algo_version'],
143+
'Requested_time': first_item['start_time']
138144
}
139145
else:
140146
details = pd.DataFrame(response[1:])
141147
chekck_value_counts = details['result'].value_counts()
142148
health_value = (chekck_value_counts.get('SUPPORTS', 0) - chekck_value_counts.get('REFUTES', 0)) / chekck_value_counts.sum()
143-
return {'Reference score': health_value,
149+
return {'Reference_score': health_value,
144150
'REFUTES': details[details['result']=='REFUTES'].to_dict(),
145151
'NOT ENOUGH INFO': details[details['result']=='NOT ENOUGH INFO'].to_dict(),
146-
'SUPPORTS': details[details['result']=='SUPPORTS'].to_dict()
152+
'SUPPORTS': details[details['result']=='SUPPORTS'].to_dict(),
153+
'algo_version': first_item['algo_version'],
154+
'Requested_time': first_item['start_time']
147155
}
148156

149157

reference_checking.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ def limit_sentence_length(sentence: str, max_length: int) -> str:
212212
return sentence_relevance_df
213213

214214
def textEntailment(self, evidence_df):
215-
SCORE_THRESHOLD=self.config['evidence_selection']['score_threshold']
215+
SCORE_THRESHOLD = self.config['evidence_selection']['score_threshold']
216216
textual_entailment_df = evidence_df.copy()
217217
te_module = TextualEntailmentModule()
218218

@@ -234,9 +234,9 @@ def process_row(row):
234234
# checking the empty evidence or the error in the evidence
235235
if evidence_size == 0 or any('Error: HTTP status code' in e['sentence'] for e in evidence):
236236
results[key] = {
237-
'evidence_TE_prob': [],
238-
'evidence_TE_labels': ['REFUTES'] * evidence_size,
239-
'evidence_TE_prob_weighted': [],
237+
'evidence_TE_prob': [[0, 1, 0]],
238+
'evidence_TE_labels': ['REFUTES'],
239+
'evidence_TE_prob_weighted': [[0, 1, 0]],
240240
'claim_TE_prob_weighted_sum': [0, 1, 0],
241241
'claim_TE_label_weighted_sum': 'REFUTES',
242242
'claim_TE_label_malon': 'REFUTES'
@@ -255,23 +255,25 @@ def process_row(row):
255255
if ev['score'] > SCORE_THRESHOLD
256256
]
257257

258-
claim_TE_prob_weighted_sum = np.sum(evidence_TE_prob_weighted, axis=0) if evidence_TE_prob_weighted else [0, 0, 0]
258+
if not evidence_TE_prob_weighted:
259+
evidence_TE_prob_weighted = [[0, 1, 0]]
259260

260-
claim_TE_label_weighted_sum = te_module.get_label_from_scores(claim_TE_prob_weighted_sum) if evidence_TE_prob_weighted else 'NOT ENOUGH INFO'
261+
claim_TE_prob_weighted_sum = np.sum(evidence_TE_prob_weighted, axis=0)
261262

262-
claim_TE_label_malon = te_module.get_label_malon(
263-
[probs for probs, ev in zip(evidence_TE_prob, evidence) if ev['score'] > SCORE_THRESHOLD]
264-
)
263+
claim_TE_label_weighted_sum = te_module.get_label_from_scores(claim_TE_prob_weighted_sum)
264+
265+
claim_TE_label_malon = te_module.get_label_malon(evidence_TE_prob)
265266

266267
results[key] = {
267268
'evidence_TE_prob': evidence_TE_prob,
268269
'evidence_TE_labels': evidence_TE_labels,
269270
'evidence_TE_prob_weighted': evidence_TE_prob_weighted,
270-
'claim_TE_prob_weighted_sum': claim_TE_prob_weighted_sum,
271+
'claim_TE_prob_weighted_sum': claim_TE_prob_weighted_sum.tolist(),
271272
'claim_TE_label_weighted_sum': claim_TE_label_weighted_sum,
272273
'claim_TE_label_malon': claim_TE_label_malon
273274
}
274275
return results
276+
275277
for i, row in tqdm(textual_entailment_df.iterrows(), total=textual_entailment_df.shape[0]):
276278
result_sets = process_row(row)
277279
for key in keys:
@@ -374,7 +376,10 @@ def TableMaking(self, verbalised_claims_df_final, result):
374376
aResult = pd.DataFrame(row['nlp_sentences_TOP_N'])[['sentence','score']]
375377
aResult.rename(columns={'score': 'Relevance_score'}, inplace=True)
376378
aResult = pd.concat([aResult, pd.DataFrame(row["evidence_TE_labels_all_TOP_N"], columns=['TextEntailment'])], axis=1)
377-
aResult = pd.concat([aResult, pd.DataFrame(np.max(row["evidence_TE_prob_all_TOP_N"], axis=1), columns=['Entailment_score'])], axis=1)
379+
380+
entailment_scores = [max(prob) for prob in row["evidence_TE_prob_all_TOP_N"]]
381+
382+
aResult = pd.concat([aResult, pd.DataFrame(entailment_scores, columns=['Entailment_score'])], axis=1)
378383
aResult = aResult.reindex(columns=['sentence', 'TextEntailment', 'Entailment_score','Relevance_score'])
379384
aBox = pd.DataFrame({'triple': [row["triple"]], 'property_id' : row['property_id'], 'url': row['url'],'Results': [aResult]})
380385
all_result = pd.concat([all_result,aBox], axis=0)

0 commit comments

Comments
 (0)