Open
Description
Search before asking
- I have searched the Supervision issues and found no similar feature requests.
Question
I've Dataloader object and I am able to convert the predicted model output using from_transformer
to Detection Object. Since i want to generate the MAP and confusion matrix, I need to convert the actual object to Detection object as well.
How to do that?
for idx,batch in enumerate(tqdm(TEST_DATALOADER)):
pixel_values = batch["pixel_values"].to(DEVICE)
pixel_mask = batch["pixel_mask"].to(DEVICE)
#print(batch["labels"])
labels = [{k: v.to(DEVICE) for k, v in t.items()} for t in batch["labels"]]
#print(labels)
with torch.no_grad():
outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
#outputs = model(**batch)
orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0)
#print(outputs)
results = image_processor.post_process_object_detection(outputs, target_sizes=orig_target_sizes,threshold=0.5)
print("LABELS")
print(labels[0].keys())
print(labels[0]['class_labels'])
print("RESULTS")
print(results[0].keys())
print(results[0]['labels'])
for result in results:
detections = sv.Detections.from_transformers(result)#.with_nms(threshold=IOU_TRESHOLD)
#sv.Detections.from_
targets.append(annotations)
predictions.append(detections)
break
Additional
No response