Skip to content

Commit 4575f58

Browse files
committed
Dataloader updations
1 parent ddafc5e commit 4575f58

File tree

6 files changed

+43
-95
lines changed

6 files changed

+43
-95
lines changed

data/direct_dataset.py

Lines changed: 11 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#python3 train.py --dataroot ./datasets/datasets/data-six/ --name endtoend_no_shadow_ --checkpoints_dir all_checkpoints/endtoend_checkpoints/ --model pix2pix_endtoend_3 --dataset_mode endtoend_no_shadow --input_nc 1 --output_nc 1 --crop_size 256 --gpu_id 1 --netG unet_256
21
import os.path
32
from data.base_dataset import BaseDataset, get_params, get_transform
43
from data.image_folder import make_dataset
@@ -7,11 +6,6 @@
76
import json
87

98
class DirectDataset(BaseDataset):
10-
"""A dataset class for paired image dataset.
11-
12-
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
13-
During test time, you need to prepare a directory '/path/to/data/test'.
14-
"""
159

1610
def __init__(self, opt):
1711
"""Initialize this dataset class.
@@ -23,18 +17,19 @@ def __init__(self, opt):
2317
self.C_paths=[]
2418
self.G_paths=[]
2519
self.K_paths=[]
20+
self.texture_paths={}
2621

2722
self.dir_AB = opt.dataroot # get the image directory
2823
for i in os.listdir(self.dir_AB):
2924
T_path = self.dir_AB +'/'+ i+'/params/meta.ndjson';
3025
with open(T_path) as f:
3126
for line in f:
3227
j_content = json.loads(line)
33-
texture=j_content["texture"]["name"]
34-
if texture!='sha' and texture!='som':
35-
self.C_paths += make_dataset(self.dir_AB+'/'+i+'/r_contour/', opt.max_dataset_size) # get image paths
36-
self.G_paths += make_dataset(self.dir_AB+'/'+i+'/r_gnomon/', opt.max_dataset_size) # get image paths
37-
self.K_paths += make_dataset(self.dir_AB+'/'+i+'/r_sketch/', opt.max_dataset_size) # get image paths
28+
tex=j_content["texture"]["name"]
29+
self.C_paths += make_dataset(self.dir_AB+'/'+i+'/r_contour/', opt.max_dataset_size) # get image paths
30+
self.G_paths += make_dataset(self.dir_AB+'/'+i+'/r_gnomon/', opt.max_dataset_size) # get image paths
31+
self.K_paths += make_dataset(self.dir_AB+'/'+i+'/r_sketch/', opt.max_dataset_size) # get image paths
32+
self.texture_paths[self.dir_AB+'/'+i+'/r_contour'] = opt.texture+'/'+tex+'/' # get image paths
3833
self.C_paths=sorted(self.C_paths)
3934
self.G_paths=sorted(self.G_paths)
4035
self.K_paths=sorted(self.K_paths)
@@ -43,17 +38,6 @@ def __init__(self, opt):
4338
self.output_nc = self.opt.output_nc
4439

4540
def __getitem__(self, index):
46-
"""Return a data point and its metadata information.
47-
48-
Parameters:
49-
index - - a random integer for data indexing
50-
51-
Returns a dictionary that contains A, B, A_paths and B_paths
52-
A (tensor) - - an image in the input domain
53-
B (tensor) - - its corresponding image in the target domain
54-
A_paths (str) - - image paths
55-
B_paths (str) - - image paths (same as A_paths)
56-
"""
5741
# read a image given a random integer index
5842

5943
C_path = self.C_paths[index]
@@ -73,14 +57,14 @@ def __getitem__(self, index):
7357
for line in f:
7458
j_content = json.loads(line)
7559
texture=j_content["texture"]["name"]
76-
77-
C=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/high.png").convert('RGB')
60+
61+
C=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"high.png").convert('RGB')
7862

79-
D=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/mid.png").convert('RGB')
63+
D=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"mid.png").convert('RGB')
8064

81-
E=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/shade.png").convert('RGB')
65+
E=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"shade.png").convert('RGB')
8266

83-
F=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/shadow.png").convert('RGB')
67+
F=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"shadow.png").convert('RGB')
8468
# apply the same transform to both A and B
8569
transform_params = get_params(self.opt, A.size)
8670
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))

data/sp_dataset.py

Lines changed: 15 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#python3 train.py --dataroot ./datasets/datasets/data-six/ --name endtoend_no_shadow_ --checkpoints_dir all_checkpoints/endtoend_checkpoints/ --model pix2pix_endtoend_3 --dataset_mode endtoend_no_shadow --input_nc 1 --output_nc 1 --crop_size 256 --gpu_id 1 --netG unet_256
21
import os.path
32
from data.base_dataset import BaseDataset, get_params, get_transform
43
from data.image_folder import make_dataset
@@ -7,18 +6,8 @@
76
import json
87

98
class SpDataset(BaseDataset):
10-
"""A dataset class for paired image dataset.
11-
12-
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
13-
During test time, you need to prepare a directory '/path/to/data/test'.
14-
"""
159

1610
def __init__(self, opt):
17-
"""Initialize this dataset class.
18-
19-
Parameters:
20-
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
21-
"""
2211
BaseDataset.__init__(self, opt)
2312
self.C_paths=[]
2413
self.G_paths=[]
@@ -27,22 +16,23 @@ def __init__(self, opt):
2716
self.S_paths=[]
2817
self.K_paths=[]
2918
self.Sh_paths=[]
19+
self.texture_paths={}
3020

3121
self.dir_AB = opt.dataroot # get the image directory
3222
for i in os.listdir(self.dir_AB):
3323
T_path = self.dir_AB +'/'+ i+'/params/meta.ndjson';
3424
with open(T_path) as f:
3525
for line in f:
3626
j_content = json.loads(line)
37-
texture=j_content["texture"]["name"]
38-
if texture!='sha' and texture!='som':
39-
self.C_paths += make_dataset(self.dir_AB+'/'+i+'/r_contour/', opt.max_dataset_size) # get image paths
40-
self.G_paths += make_dataset(self.dir_AB+'/'+i+'/r_gnomon/', opt.max_dataset_size) # get image paths
41-
self.H_paths += make_dataset(self.dir_AB+'/'+i+'/r_highlights/', opt.max_dataset_size) # get image paths
42-
self.M_paths += make_dataset(self.dir_AB+'/'+i+'/r_midtones/', opt.max_dataset_size) # get image paths
43-
self.S_paths += make_dataset(self.dir_AB+'/'+i+'/r_shades/', opt.max_dataset_size) # get image paths
44-
self.Sh_paths += make_dataset(self.dir_AB+'/'+i+'/r_shadow/', opt.max_dataset_size) # get image paths
45-
self.K_paths += make_dataset(self.dir_AB+'/'+i+'/r_sketch/', opt.max_dataset_size) # get image paths
27+
tex=j_content["texture"]["name"]
28+
self.C_paths += make_dataset(self.dir_AB+'/'+i+'/r_contour/', opt.max_dataset_size) # get image paths
29+
self.G_paths += make_dataset(self.dir_AB+'/'+i+'/r_gnomon/', opt.max_dataset_size) # get image paths
30+
self.H_paths += make_dataset(self.dir_AB+'/'+i+'/r_highlights/', opt.max_dataset_size) # get image paths
31+
self.M_paths += make_dataset(self.dir_AB+'/'+i+'/r_midtones/', opt.max_dataset_size) # get image paths
32+
self.S_paths += make_dataset(self.dir_AB+'/'+i+'/r_shades/', opt.max_dataset_size) # get image paths
33+
self.Sh_paths += make_dataset(self.dir_AB+'/'+i+'/r_shadow/', opt.max_dataset_size) # get image paths
34+
self.K_paths += make_dataset(self.dir_AB+'/'+i+'/r_sketch/', opt.max_dataset_size) # get image paths
35+
self.texture_paths[self.dir_AB+'/'+i+'/r_contour'] = opt.texture+'/'+tex+'/' # get image paths
4636
self.C_paths=sorted(self.C_paths)
4737
self.G_paths=sorted(self.G_paths)
4838
self.H_paths=sorted(self.H_paths)
@@ -55,17 +45,6 @@ def __init__(self, opt):
5545
self.output_nc = self.opt.output_nc
5646

5747
def __getitem__(self, index):
58-
"""Return a data point and its metadata information.
59-
60-
Parameters:
61-
index - - a random integer for data indexing
62-
63-
Returns a dictionary that contains A, B, A_paths and B_paths
64-
A (tensor) - - an image in the input domain
65-
B (tensor) - - its corresponding image in the target domain
66-
A_paths (str) - - image paths
67-
B_paths (str) - - image paths (same as A_paths)
68-
"""
6948
# read a image given a random integer index
7049

7150
C_path = self.C_paths[index]
@@ -93,14 +72,14 @@ def __getitem__(self, index):
9372
for line in f:
9473
j_content = json.loads(line)
9574
texture=j_content["texture"]["name"]
96-
97-
C=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/high.png").convert('RGB')
75+
76+
C=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"high.png").convert('RGB')
9877

99-
D=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/mid.png").convert('RGB')
78+
D=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"mid.png").convert('RGB')
10079

101-
E=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/shade.png").convert('RGB')
80+
E=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"shade.png").convert('RGB')
10281

103-
F=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/shadow.png").convert('RGB')
82+
F=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"shadow.png").convert('RGB')
10483

10584
# apply the same transform to both A and B
10685
transform_params = get_params(self.opt, A.size)

data/sp_ws_dataset.py

Lines changed: 8 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,6 @@
66
import json
77

88
class SpWsDataset(BaseDataset):
9-
"""A dataset class for paired image dataset.
10-
11-
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
12-
During test time, you need to prepare a directory '/path/to/data/test'.
13-
"""
14-
159
def __init__(self, opt):
1610
"""Initialize this dataset class.
1711
@@ -25,21 +19,23 @@ def __init__(self, opt):
2519
self.M_paths=[]
2620
self.S_paths=[]
2721
self.K_paths=[]
22+
self.texture_paths={}
2823

2924
self.dir_AB = opt.dataroot # get the image directory
3025
for i in os.listdir(self.dir_AB):
3126
T_path = self.dir_AB +'/'+ i+'/params/meta.ndjson';
3227
with open(T_path) as f:
3328
for line in f:
3429
j_content = json.loads(line)
35-
texture=j_content["texture"]["name"]
36-
if texture!='sha' and texture!='som':
30+
tex=j_content["texture"]["name"]
31+
3732
self.C_paths += make_dataset(self.dir_AB+'/'+i+'/r_contour/', opt.max_dataset_size) # get image paths
3833
self.G_paths += make_dataset(self.dir_AB+'/'+i+'/r_gnomon/', opt.max_dataset_size) # get image paths
3934
self.H_paths += make_dataset(self.dir_AB+'/'+i+'/r_highlights/', opt.max_dataset_size) # get image paths
4035
self.M_paths += make_dataset(self.dir_AB+'/'+i+'/r_midtones/', opt.max_dataset_size) # get image paths
4136
self.S_paths += make_dataset(self.dir_AB+'/'+i+'/r_shades/', opt.max_dataset_size) # get image paths
4237
self.K_paths += make_dataset(self.dir_AB+'/'+i+'/r_sketch_geom/', opt.max_dataset_size) # get image paths
38+
self.texture_paths[self.dir_AB+'/'+i+'/r_contour'] = opt.texture+'/'+tex+'/' # get image paths
4339
self.C_paths=sorted(self.C_paths)
4440
self.G_paths=sorted(self.G_paths)
4541
self.H_paths=sorted(self.H_paths)
@@ -52,17 +48,6 @@ def __init__(self, opt):
5248
self.output_nc = self.opt.output_nc
5349

5450
def __getitem__(self, index):
55-
"""Return a data point and its metadata information.
56-
57-
Parameters:
58-
index - - a random integer for data indexing
59-
60-
Returns a dictionary that contains A, B, A_paths and B_paths
61-
A (tensor) - - an image in the input domain
62-
B (tensor) - - its corresponding image in the target domain
63-
A_paths (str) - - image paths
64-
B_paths (str) - - image paths (same as A_paths)
65-
"""
6651
# read a image given a random integer index
6752

6853
C_path = self.C_paths[index]
@@ -83,19 +68,18 @@ def __getitem__(self, index):
8368

8469
except:
8570
print("EXCEPTION:"+str(AB_path))
86-
texture="zero"
8771
with open(T_path) as f:
8872
for line in f:
8973
j_content = json.loads(line)
9074
texture=j_content["texture"]["name"]
9175

92-
C=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/high.png").convert('RGB')
76+
C=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"high.png").convert('RGB')
9377

94-
D=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/mid.png").convert('RGB')
78+
D=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"mid.png").convert('RGB')
9579

96-
E=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/shade.png").convert('RGB')
80+
E=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"shade.png").convert('RGB')
9781

98-
F=Image.open("/nfs/151/gpu/raghav/data/shadegan/brushes_v2/"+texture+"/shadow.png").convert('RGB')
82+
F=Image.open(self.texture_paths[C_path.rsplit('/',1)[0]]+"shadow.png").convert('RGB')
9983

10084
transform_params = get_params(self.opt, A.size)
10185
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))

options/base_options.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ def initialize(self, parser):
5454
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
5555
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
5656
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
57+
parser.add_argument('--texture', default='', type=str, help='path to texture images')
5758
self.initialized = True
5859
return parser
5960

test_commands

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
direct:
2-
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name direct --checkpoints_dir test_checkpoints --model direct --dataset_mode direct --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results
2+
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name direct --checkpoints_dir test_checkpoints --model direct --dataset_mode direct --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2
33

44
sp:
5-
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name sp --checkpoints_dir test_checkpoints --model sp --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results
5+
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name sp --checkpoints_dir test_checkpoints --model sp --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2
66

77
sp-ws:
8-
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name sp_ws --checkpoints_dir test_checkpoints --model sp_ws --dataset_mode sp_ws --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results
8+
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name sp_ws --checkpoints_dir test_checkpoints --model sp_ws --dataset_mode sp_ws --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2
99

1010
se:
11-
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name se --checkpoints_dir test_checkpoints --model se --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results
11+
python3 test.py --dataroot /nfs/154/dataset2/shadegan/data-test/renders/ --name se --checkpoints_dir test_checkpoints --model se --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --results_dir test_results --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2

train_commands

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
direct:
2-
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-single/renders/ --name direct --checkpoints_dir test_checkpoints --model direct --dataset_mode direct --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1
2+
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-single/renders/ --name direct --checkpoints_dir test_checkpoints --model direct --dataset_mode direct --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2
33

44
sp:
5-
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-single/renders/ --name sp --checkpoints_dir test_checkpoints --model sp --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1
5+
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-single/renders/ --name sp --checkpoints_dir test_checkpoints --model sp --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2
66

77
sp-ws:
8-
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-six/renders/ --name sp_ws --checkpoints_dir test_checkpoints --model sp_ws --dataset_mode sp_ws --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1
8+
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-six/renders/ --name sp_ws --checkpoints_dir test_checkpoints --model sp_ws --dataset_mode sp_ws --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2
99

1010
se:
11-
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-six/renders/ --name se --checkpoints_dir test_checkpoints --model se --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --gpu_id 1
11+
python3 train.py --dataroot /nfs/154/dataset2/shadegan/data-six/renders/ --name se --checkpoints_dir test_checkpoints --model se --dataset_mode sp --input_nc 1 --output_nc 1 --load_size 256 --crop_size 256 --batch_size 1 --texture /nfs/151/gpu/raghav/data/shadegan/brushes_v2

0 commit comments

Comments
 (0)