Skip to content

Commit bc88342

Browse files
fix: Update anomalib version (#130)
* docs: Update anomalib and quadra version, update changelog * refactor: Suppress heavy prints from auto_convert_mixed_precision function
1 parent 83f342c commit bc88342

File tree

5 files changed

+19
-8
lines changed

5 files changed

+19
-8
lines changed

CHANGELOG.md

+6
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,12 @@
22
# Changelog
33
All notable changes to this project will be documented in this file.
44

5+
### [2.2.5]
6+
7+
#### Updated
8+
9+
- Update anomalib to v0.7.0.dev143 to fix a bug introduced in the previous version that caused the training to fail if the dataset size was smaller than the batch size.
10+
511
### [2.2.4]
612

713
#### Updated

poetry.lock

+4-4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "quadra"
3-
version = "2.2.4"
3+
version = "2.2.5"
44
description = "Deep Learning experiment orchestration library"
55
authors = [
66
"Federico Belotti <[email protected]>",
@@ -72,7 +72,7 @@ h5py = "~3.8"
7272
timm = "0.9.12"
7373

7474
segmentation_models_pytorch-orobix = "0.3.3.dev1"
75-
anomalib-orobix = "0.7.0.dev142"
75+
anomalib-orobix = "0.7.0.dev143"
7676
xxhash = "~3.2"
7777
torchinfo = "~1.8"
7878
typing_extensions = { version = "4.11.0", python = "<3.10" }

quadra/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "2.2.4"
1+
__version__ = "2.2.5"
22

33

44
def get_version():

quadra/utils/export.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from __future__ import annotations
22

3+
import contextlib
34
import os
45
from collections.abc import Sequence
56
from typing import Any, Literal, TypeVar, cast
@@ -377,7 +378,11 @@ def _safe_export_half_precision_onnx(
377378
model_fp32 = onnx.load(export_model_path)
378379
test_data = {input_names[i]: inp[i].float().cpu().numpy() for i in range(len(inp))}
379380
log.warning("Attempting to convert model in mixed precision, this may take a while...")
380-
model_fp16 = auto_convert_mixed_precision(model_fp32, test_data, rtol=0.01, atol=0.001, keep_io_types=False)
381+
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
382+
# This function prints a lot of information that is not useful for the user
383+
model_fp16 = auto_convert_mixed_precision(
384+
model_fp32, test_data, rtol=0.01, atol=0.001, keep_io_types=False
385+
)
381386
onnx.save(model_fp16, export_model_path)
382387

383388
onnx_model = onnx.load(export_model_path)

0 commit comments

Comments
 (0)