Skip to content

Commit ff5f80a

Browse files
authored
chore: add [CCCV] prefix to error and log messages (#3)
* chore: only test PyTorch >= 2.0 * chore: add CCCV prefix to error and log messages
1 parent ef717dc commit ff5f80a

File tree

11 files changed

+36
-36
lines changed

11 files changed

+36
-36
lines changed

.github/workflows/CI-test.yml

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ jobs:
2424
matrix:
2525
os-version: ["windows-latest", "ubuntu-latest", "macos-14"]
2626
python-version: ["3.9"]
27-
pytorch-version: ["2.7.1", "2.1.2", "2.0.0", "1.13.0"]
27+
pytorch-version: ["2.7.1", "2.1.2", "2.0.0"]
2828

2929
runs-on: ${{ matrix.os-version }}
3030
steps:
@@ -58,11 +58,6 @@ jobs:
5858
run: |
5959
uv pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1
6060
61-
- name: Install PyTorch 1.13.0
62-
if: matrix.pytorch-version == '1.13.0'
63-
run: |
64-
uv pip install torch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0
65-
6661
- name: Show environment
6762
run: |
6863
uv pip list

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ an inference lib for image/video restoration and video frame interpolation with
1010

1111
### Install
1212

13-
Make sure you have Python >= 3.9 and PyTorch >= 1.13 installed
13+
Make sure you have Python >= 3.9 and PyTorch >= 2.0 installed
1414

1515
```bash
1616
pip install cccv

cccv/cache_models/__init__.py

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -53,19 +53,24 @@ def load_file_from_url(
5353
:return:
5454
"""
5555

56+
CCCV_REMOTE_MODEL_ZOO = os.environ.get(
57+
"CCCV_REMOTE_MODEL_ZOO", "https://github.com/EutropicAI/cccv/releases/download/model_zoo/"
58+
)
59+
CCCV_CACHE_MODEL_DIR = os.environ.get("CCCV_CACHE_MODEL_DIR", str(CACHE_PATH))
60+
5661
if model_dir is None:
57-
model_dir = str(CACHE_PATH)
62+
model_dir = str(CCCV_CACHE_MODEL_DIR)
63+
print(
64+
f"[CCCV] Using default cache model path {model_dir}, override it by setting environment variable CCCV_CACHE_MODEL_DIR"
65+
)
5866

5967
cached_file_path = os.path.abspath(os.path.join(model_dir, config.name))
6068

6169
if config.url is not None:
6270
_url: str = str(config.url)
6371
else:
64-
CCCV_REMOTE_MODEL_ZOO = os.environ.get(
65-
"CCCV_REMOTE_MODEL_ZOO", "https://github.com/EutropicAI/cccv/releases/download/model_zoo/"
66-
)
6772
print(
68-
f"Fetch models from {CCCV_REMOTE_MODEL_ZOO}, override it by setting environment variable CCCV_REMOTE_MODEL_ZOO"
73+
f"[CCCV] Fetching models from {CCCV_REMOTE_MODEL_ZOO}, override it by setting environment variable CCCV_REMOTE_MODEL_ZOO"
6974
)
7075
if not CCCV_REMOTE_MODEL_ZOO.endswith("/"):
7176
CCCV_REMOTE_MODEL_ZOO += "/"
@@ -79,15 +84,15 @@ def load_file_from_url(
7984

8085
if not os.path.exists(cached_file_path) or force_download:
8186
if _gh_proxy is not None:
82-
print(f"Using github proxy: {_gh_proxy}")
83-
print(f"Downloading: {_url} to {cached_file_path}\n")
87+
print(f"[CCCV] Using github proxy: {_gh_proxy}")
88+
print(f"[CCCV] Downloading: {_url} to {cached_file_path}\n")
8489

8590
@retry(wait=wait_random(min=3, max=5), stop=stop_after_delay(10) | stop_after_attempt(30))
8691
def _download() -> None:
8792
try:
8893
download_url_to_file(url=_url, dst=cached_file_path, hash_prefix=None, progress=progress)
8994
except Exception as e:
90-
print(f"Download failed: {e}, retrying...")
95+
print(f"[CCCV] Download failed: {e}, retrying...")
9196
raise e
9297

9398
_download()
@@ -96,7 +101,7 @@ def _download() -> None:
96101
get_hash = get_file_sha256(cached_file_path)
97102
if get_hash != config.hash:
98103
raise ValueError(
99-
f"File {cached_file_path} hash mismatched with config hash {config.hash}, compare with {get_hash}"
104+
f"[CCCV] File {cached_file_path} hash mismatched with config hash {config.hash}, compare with {get_hash}"
100105
)
101106

102107
return cached_file_path
@@ -110,4 +115,4 @@ def _download() -> None:
110115
continue
111116
file_path = os.path.join(root, file)
112117
name = os.path.basename(file_path)
113-
print(f"{name}: {get_file_sha256(file_path)}")
118+
print(f"[CCCV] {name}: {get_file_sha256(file_path)}")

cccv/model/auxiliary_base_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@
88
@MODEL_REGISTRY.register(name=ModelType.AuxiliaryBaseModel)
99
class AuxiliaryBaseModel(CCBaseModel):
1010
def inference(self, *args: Any, **kwargs: Any) -> Any:
11-
raise NotImplementedError("Auxiliary model should use self.model to load in the main model")
11+
raise NotImplementedError("[CCCV] Auxiliary model should use self.model to load in the main model")

cccv/model/base_model.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def __init__(
6969
try:
7070
self.model = self.model.half()
7171
except Exception as e:
72-
print(f"Error: {e}, fp16 is not supported on this model.")
72+
print(f"[CCCV] Warning: {e}. \nfp16 is not supported on this model, fallback to fp32.")
7373
self.fp16 = False
7474
self.model = self.load_model()
7575

@@ -83,7 +83,7 @@ def __init__(
8383
self.compile_backend = "inductor"
8484
self.model = torch.compile(self.model, backend=self.compile_backend)
8585
except Exception as e:
86-
print(f"Error: {e}, compile is not supported on this model.")
86+
print(f"[CCCV] Error: {e}, compile is not supported on this model.")
8787

8888
def post_init_hook(self) -> None:
8989
"""
@@ -109,7 +109,7 @@ def get_state_dict(self) -> Any:
109109
config=cfg, force_download=False, model_dir=self.model_dir, gh_proxy=self.gh_proxy
110110
)
111111
except Exception as e:
112-
print(f"Error: {e}, try force download the model...")
112+
print(f"[CCCV] Error: {e}, try force download the model...")
113113
state_dict_path = load_file_from_url(
114114
config=cfg, force_download=True, model_dir=self.model_dir, gh_proxy=self.gh_proxy
115115
)
@@ -150,9 +150,9 @@ def load_model(self) -> Any:
150150
try:
151151
net_kw = {k: cfg_dict[k] for k in signature(net).parameters}
152152
except (KeyError, TypeError) as e:
153-
raise RuntimeError(f"Config missing or mismatch required param for {net.__name__}: {e}") from e
153+
raise RuntimeError(f"[CCCV] Config missing or mismatch required param for {net.__name__}: {e}") from e
154154

155-
# print(f"net_kw: {net_kw}")
155+
# print(f"[CCCV] net_kw: {net_kw}")
156156
model = net(**net_kw)
157157

158158
model.load_state_dict(state_dict)

cccv/model/vsr_base_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def inference_image_list(self, img_list: List[np.ndarray], *args: Any, **kwargs:
7070

7171
return [img]
7272
else:
73-
raise ValueError(f"Unexpected output shape: {out.shape}")
73+
raise ValueError(f"[CCCV] Unexpected output shape: {out.shape}")
7474

7575
@torch.inference_mode() # type: ignore
7676
def inference_video(self, clip: Any, *args: Any, **kwargs: Any) -> Any:

cccv/util/color.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,10 @@ def rgb_to_yuv(image: Tensor) -> Tensor:
2626
>>> output = rgb_to_yuv(input) # 2x3x4x5
2727
"""
2828
if not isinstance(image, Tensor):
29-
raise TypeError(f"Input type is not a Tensor. Got {type(image)}")
29+
raise TypeError(f"[CCCV] Input type is not a Tensor. Got {type(image)}")
3030

3131
if len(image.shape) < 3 or image.shape[-3] != 3:
32-
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
32+
raise ValueError(f"[CCCV] Input size must have a shape of (*, 3, H, W). Got {image.shape}")
3333

3434
r: Tensor = image[..., 0, :, :]
3535
g: Tensor = image[..., 1, :, :]
@@ -65,10 +65,10 @@ def yuv_to_rgb(image: Tensor) -> Tensor:
6565
>>> output = yuv_to_rgb(input) # 2x3x4x5
6666
"""
6767
if not isinstance(image, Tensor):
68-
raise TypeError(f"Input type is not a Tensor. Got {type(image)}")
68+
raise TypeError(f"[CCCV] Input type is not a Tensor. Got {type(image)}")
6969

7070
if image.dim() < 3 or image.shape[-3] != 3:
71-
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
71+
raise ValueError(f"[CCCV] Input size must have a shape of (*, 3, H, W). Got {image.shape}")
7272

7373
y: Tensor = image[..., 0, :, :]
7474
u: Tensor = image[..., 1, :, :]

cccv/util/device.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ def default_device() -> torch.device:
1010
try:
1111
return torch.device("mps" if torch.backends.mps.is_available() else "cpu")
1212
except Exception as e:
13-
print(f"Err: {e}, MPS is not available, use CPU instead.")
13+
print(f"[CCCV] Error: {e}, MPS is not available, use CPU instead.")
1414
return torch.device("cpu")
1515

1616

cccv/util/registry.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def __init__(self, name: str) -> None:
4141

4242
def _do_register(self, name: str, obj: Any) -> None:
4343
if name in self._obj_map:
44-
print("An object named '{}' was already registered in '{}' registry!".format(name, self._name))
44+
print("[CCCV] An object named '{}' was already registered in '{}' registry!".format(name, self._name))
4545
else:
4646
self._obj_map[name] = obj
4747

@@ -69,7 +69,7 @@ def deco(func_or_class: Any) -> Any:
6969
def get(self, name: str) -> Any:
7070
ret = self._obj_map.get(name)
7171
if ret is None:
72-
raise KeyError("No object named '{}' found in '{}' registry!".format(name, self._name))
72+
raise KeyError("[CCCV] No object named '{}' found in '{}' registry!".format(name, self._name))
7373
return ret
7474

7575
def __contains__(self, name: str) -> bool:

cccv/vs/sr.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def inference_sr(
3030
"""
3131

3232
if clip.format.id not in [vs.RGBH, vs.RGBS]:
33-
raise vs.Error("Only vs.RGBH and vs.RGBS formats are supported")
33+
raise vs.Error("[CCCV] Only vs.RGBH and vs.RGBS formats are supported")
3434

3535
if device.type == torch.device("cuda").type:
3636
return inference_sr_cuda(inference, clip, scale, device)

0 commit comments

Comments
 (0)