-
Notifications
You must be signed in to change notification settings - Fork 13
Open
Description
[rank0]: Traceback (most recent call last):
[rank0]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank0]: main()
[rank0]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank0]: trainer.fit()
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank0]: self.prepare_dataset()
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank0]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank0]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank0]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank0]: return encode_fn(image.convert("RGB")).to("cpu")
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank0]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank0]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank0]: latent_dist = vae.encode(image).latent_dist
[rank0]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank0]: return method(self, *args, **kwargs)
[rank0]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank0]: h = self._encode(x)
[rank0]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank0]: return self._tiled_encode(x)
[rank0]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank0]: tile = self.encoder(tile)
[rank0]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank0]: return self._call_impl(*args, **kwargs)
[rank0]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank0]: return forward_call(*args, **kwargs)
[rank0]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank0]: sample = self.conv_in(sample)
[rank0]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank0]: return self._call_impl(*args, **kwargs)
[rank0]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank0]: return forward_call(*args, **kwargs)
[rank0]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank0]: return self._conv_forward(input, self.weight, self.bias)
[rank0]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank0]: return F.conv2d(
[rank0]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank5]: Traceback (most recent call last):
[rank5]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank5]: main()
[rank5]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank5]: trainer.fit()
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank5]: self.prepare_dataset()
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank5]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank5]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank5]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank5]: return encode_fn(image.convert("RGB")).to("cpu")
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank5]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank5]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank5]: latent_dist = vae.encode(image).latent_dist
[rank5]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank5]: return method(self, *args, **kwargs)
[rank5]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank5]: h = self._encode(x)
[rank5]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank5]: return self._tiled_encode(x)
[rank5]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank5]: tile = self.encoder(tile)
[rank5]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank5]: return self._call_impl(*args, **kwargs)
[rank5]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank5]: return forward_call(*args, **kwargs)
[rank5]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank5]: sample = self.conv_in(sample)
[rank5]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank5]: return self._call_impl(*args, **kwargs)
[rank5]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank5]: return forward_call(*args, **kwargs)
[rank5]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank5]: return self._conv_forward(input, self.weight, self.bias)
[rank5]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank5]: return F.conv2d(
[rank5]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank2]: 2025-06-07 05:24:20 | datasets.utils | WARNING | Image object does not have filename attribute, skipping caching.
[rank2]: Traceback (most recent call last):
[rank2]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank2]: main()
[rank2]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank2]: trainer.fit()
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank2]: self.prepare_dataset()
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank2]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank2]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank2]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank2]: return encode_fn(image.convert("RGB")).to("cpu")
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank2]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank2]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank2]: latent_dist = vae.encode(image).latent_dist
[rank2]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank2]: return method(self, *args, **kwargs)
[rank2]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank2]: h = self._encode(x)
[rank2]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank2]: return self._tiled_encode(x)
[rank2]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank2]: tile = self.encoder(tile)
[rank2]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank2]: return self._call_impl(*args, **kwargs)
[rank2]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank2]: return forward_call(*args, **kwargs)
[rank2]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank2]: sample = self.conv_in(sample)
[rank2]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank2]: return self._call_impl(*args, **kwargs)
[rank2]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank2]: return forward_call(*args, **kwargs)
[rank2]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank2]: return self._conv_forward(input, self.weight, self.bias)
[rank2]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank2]: return F.conv2d(
[rank2]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank1]: 2025-06-07 05:24:21 | datasets.utils | WARNING | Image object does not have filename attribute, skipping caching.
[rank3]: 2025-06-07 05:24:21 | datasets.utils | WARNING | Image object does not have filename attribute, skipping caching.
[rank1]: Traceback (most recent call last):
[rank1]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank1]: main()
[rank1]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank1]: trainer.fit()
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank1]: self.prepare_dataset()
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank1]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank1]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank1]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank1]: return encode_fn(image.convert("RGB")).to("cpu")
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank1]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank1]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank1]: latent_dist = vae.encode(image).latent_dist
[rank1]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank1]: return method(self, *args, **kwargs)
[rank1]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank1]: h = self._encode(x)
[rank1]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank1]: return self._tiled_encode(x)
[rank1]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank1]: tile = self.encoder(tile)
[rank1]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank1]: return self._call_impl(*args, **kwargs)
[rank1]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank1]: return forward_call(*args, **kwargs)
[rank1]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank1]: sample = self.conv_in(sample)
[rank1]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank1]: return self._call_impl(*args, **kwargs)
[rank1]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank1]: return forward_call(*args, **kwargs)
[rank1]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank1]: return self._conv_forward(input, self.weight, self.bias)
[rank1]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank1]: return F.conv2d(
[rank1]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank3]: Traceback (most recent call last):
[rank3]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank3]: main()
[rank3]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank3]: trainer.fit()
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank3]: self.prepare_dataset()
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank3]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank3]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank3]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank3]: return encode_fn(image.convert("RGB")).to("cpu")
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank3]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank3]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank3]: latent_dist = vae.encode(image).latent_dist
[rank3]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank3]: return method(self, *args, **kwargs)
[rank3]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank3]: h = self._encode(x)
[rank3]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank3]: return self._tiled_encode(x)
[rank3]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank3]: tile = self.encoder(tile)
[rank3]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank3]: return self._call_impl(*args, **kwargs)
[rank3]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank3]: return forward_call(*args, **kwargs)
[rank3]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank3]: sample = self.conv_in(sample)
[rank3]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank3]: return self._call_impl(*args, **kwargs)
[rank3]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank3]: return forward_call(*args, **kwargs)
[rank3]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank3]: return self._conv_forward(input, self.weight, self.bias)
[rank3]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank3]: return F.conv2d(
[rank3]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank4]: 2025-06-07 05:24:21 | datasets.utils | WARNING | Image object does not have filename attribute, skipping caching.
[rank6]: 2025-06-07 05:24:21 | datasets.utils | WARNING | Image object does not have filename attribute, skipping caching.
[rank4]: Traceback (most recent call last):
[rank4]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank4]: main()
[rank4]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank4]: trainer.fit()
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank4]: self.prepare_dataset()
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank4]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank4]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank4]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank4]: return encode_fn(image.convert("RGB")).to("cpu")
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank4]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank4]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank4]: latent_dist = vae.encode(image).latent_dist
[rank4]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank4]: return method(self, *args, **kwargs)
[rank4]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank4]: h = self._encode(x)
[rank4]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank4]: return self._tiled_encode(x)
[rank4]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank4]: tile = self.encoder(tile)
[rank4]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank4]: return self._call_impl(*args, **kwargs)
[rank4]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank4]: return forward_call(*args, **kwargs)
[rank4]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank4]: sample = self.conv_in(sample)
[rank4]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank4]: return self._call_impl(*args, **kwargs)
[rank4]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank4]: return forward_call(*args, **kwargs)
[rank4]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank4]: return self._conv_forward(input, self.weight, self.bias)
[rank4]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank4]: return F.conv2d(
[rank4]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank6]: Traceback (most recent call last):
[rank6]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank6]: main()
[rank6]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank6]: trainer.fit()
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank6]: self.prepare_dataset()
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank6]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank6]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank6]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank6]: return encode_fn(image.convert("RGB")).to("cpu")
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank6]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank6]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank6]: latent_dist = vae.encode(image).latent_dist
[rank6]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank6]: return method(self, *args, **kwargs)
[rank6]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank6]: h = self._encode(x)
[rank6]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank6]: return self._tiled_encode(x)
[rank6]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank6]: tile = self.encoder(tile)
[rank6]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank6]: return self._call_impl(*args, **kwargs)
[rank6]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank6]: return forward_call(*args, **kwargs)
[rank6]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank6]: sample = self.conv_in(sample)
[rank6]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank6]: return self._call_impl(*args, **kwargs)
[rank6]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank6]: return forward_call(*args, **kwargs)
[rank6]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank6]: return self._conv_forward(input, self.weight, self.bias)
[rank6]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank6]: return F.conv2d(
[rank6]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank7]: 2025-06-07 05:24:21 | datasets.utils | WARNING | Image object does not have filename attribute, skipping caching.
[rank7]: Traceback (most recent call last):
[rank7]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 23, in
[rank7]: main()
[rank7]: File "/mnt/nj-dev-data-image-text2image-sdb/project/CogKit_bkp/quickstart/scripts/t2i/../train.py", line 19, in main
[rank7]: trainer.fit()
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/base/base_trainer.py", line 131, in fit
[rank7]: self.prepare_dataset()
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in prepare_dataset
[rank7]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/trainer.py", line 164, in
[rank7]: length_list = [self.sample_to_length(sample) for sample in self.train_dataset]
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 102, in getitem
[rank7]: encoded_image = get_image_embedding(encode_fn, image, cache_dir)
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/utils.py", line 228, in get_image_embedding
[rank7]: return encode_fn(image.convert("RGB")).to("cpu")
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/datasets/t2i_dataset.py", line 97, in encode_fn
[rank7]: encoded_image = self.trainer.encode_image(image_preprocessed[None, ...])[0]
[rank7]: File "/usr/local/lib/python3.10/site-packages/cogkit/finetune/diffusion/models/cogview/cogview4/lora_trainer.py", line 147, in encode_image
[rank7]: latent_dist = vae.encode(image).latent_dist
[rank7]: File "/usr/local/lib/python3.10/site-packages/diffusers/utils/accelerate_utils.py", line 46, in wrapper
[rank7]: return method(self, *args, **kwargs)
[rank7]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 278, in encode
[rank7]: h = self._encode(x)
[rank7]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 250, in _encode
[rank7]: return self._tiled_encode(x)
[rank7]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/autoencoder_kl.py", line 369, in _tiled_encode
[rank7]: tile = self.encoder(tile)
[rank7]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank7]: return self._call_impl(*args, **kwargs)
[rank7]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank7]: return forward_call(*args, **kwargs)
[rank7]: File "/usr/local/lib/python3.10/site-packages/diffusers/models/autoencoders/vae.py", line 156, in forward
[rank7]: sample = self.conv_in(sample)
[rank7]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
[rank7]: return self._call_impl(*args, **kwargs)
[rank7]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
[rank7]: return forward_call(*args, **kwargs)
[rank7]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 554, in forward
[rank7]: return self._conv_forward(input, self.weight, self.bias)
[rank7]: File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 549, in _conv_forward
[rank7]: return F.conv2d(
[rank7]: RuntimeError: Input type (CUDABFloat16Type) and weight type (CPUBFloat16Type) should be the same
[rank0]:[W607 05:24:22.009468669 ProcessGroupNCCL.cpp:1250] Warning: WARNING: process group has NOT been destroyed before we destruct ProcessGroupNCCL. On normal program exit, the application should call destroy_process_group to ensure that any pending NCCL operations have finished in this process. In rare cases this process can exit before this point and block the progress of another member of the process group. This constraint has always been present, but this warning has only been added since PyTorch 2.4 (function operator())
W0607 05:24:24.167000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12572 closing signal SIGTERM
W0607 05:24:24.167000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12573 closing signal SIGTERM
W0607 05:24:24.167000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12574 closing signal SIGTERM
W0607 05:24:24.168000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12575 closing signal SIGTERM
W0607 05:24:24.168000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12576 closing signal SIGTERM
W0607 05:24:24.168000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12578 closing signal SIGTERM
W0607 05:24:24.168000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:897] Sending process 12579 closing signal SIGTERM
E0607 05:24:25.598000 12486 site-packages/torch/distributed/elastic/multiprocessing/api.py:869] failed (exitcode: 1) local_rank: 5 (pid: 12577) of binary: /usr/local/bin/python3.10
Traceback (most recent call last):
File "/usr/local/bin/torchrun", line 8, in
sys.exit(main())
File "/usr/local/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/init.py", line 355, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/distributed/run.py", line 919, in main
run(args)
File "/usr/local/lib/python3.10/site-packages/torch/distributed/run.py", line 910, in run
elastic_launch(
File "/usr/local/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 138, in call
return launch_agent(self._config, self._entrypoint, list(args))
File "/usr/local/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 269, in launch_agent
raise ChildFailedError(
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
../train.py FAILED
Failures:
<NO_OTHER_FAILURES>
Root Cause (first observed failure):
[0]:
time : 2025-06-07_05:24:24
host : tj-1014376-cogkit-250607-copy-master-0
rank : 5 (local_rank: 5)
exitcode : 1 (pid: 12577)
error_file: <N/A>
traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
Metadata
Metadata
Assignees
Labels
No labels