-
Notifications
You must be signed in to change notification settings - Fork 153
Expand file tree
/
Copy pathfusion_model.py
More file actions
120 lines (95 loc) · 5.44 KB
/
fusion_model.py
File metadata and controls
120 lines (95 loc) · 5.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import os
import torch
from torch import nn
from collections import OrderedDict
from util.image_pool import ImagePool
from util import util
from .base_model import BaseModel
from . import networks
import numpy as np
from skimage import io
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
import math
from matplotlib import colors
class FusionModel(BaseModel):
def name(self):
return 'FusionModel'
@staticmethod
def modify_commandline_options(parser, is_train=True):
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.model_names = ['G', 'GF']
# load/define networks
num_in = opt.input_nc + opt.output_nc + 1
self.netG = networks.define_G(num_in, opt.output_nc, opt.ngf,
'instance', opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids,
use_tanh=True, classification=False)
self.netG.eval()
self.netG = nn.DataParallel(self.netG)
self.netGF = networks.define_G(num_in, opt.output_nc, opt.ngf,
'fusion', opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids,
use_tanh=True, classification=False)
self.netGF.eval()
self.netGF = nn.DataParallel(self.netGF)
self.netGComp = networks.define_G(num_in, opt.output_nc, opt.ngf,
'siggraph', opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids,
use_tanh=True, classification=opt.classification)
self.netGComp.eval()
self.netGComp = nn.DataParallel(self.netGComp)
def set_input(self, input):
AtoB = self.opt.which_direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.hint_B = input['hint_B'].to(self.device)
self.mask_B = input['mask_B'].to(self.device)
self.mask_B_nc = self.mask_B + self.opt.mask_cent
self.real_B_enc = util.encode_ab_ind(self.real_B[:, :, ::4, ::4], self.opt)
def set_fusion_input(self, input, box_info):
AtoB = self.opt.which_direction == 'AtoB'
self.full_real_A = input['A' if AtoB else 'B'].to(self.device)
self.full_real_B = input['B' if AtoB else 'A'].to(self.device)
self.full_hint_B = input['hint_B'].to(self.device)
self.full_mask_B = input['mask_B'].to(self.device)
self.full_mask_B_nc = self.full_mask_B + self.opt.mask_cent
self.full_real_B_enc = util.encode_ab_ind(self.full_real_B[:, :, ::4, ::4], self.opt)
self.box_info_list = box_info
def set_forward_without_box(self, input):
AtoB = self.opt.which_direction == 'AtoB'
self.full_real_A = input['A' if AtoB else 'B'].to(self.device)
self.full_real_B = input['B' if AtoB else 'A'].to(self.device)
# self.image_paths = input['A_paths' if AtoB else 'B_paths']
self.full_hint_B = input['hint_B'].to(self.device)
self.full_mask_B = input['mask_B'].to(self.device)
self.full_mask_B_nc = self.full_mask_B + self.opt.mask_cent
self.full_real_B_enc = util.encode_ab_ind(self.full_real_B[:, :, ::4, ::4], self.opt)
(_, self.comp_B_reg) = self.netGComp(self.full_real_A, self.full_hint_B, self.full_mask_B)
self.fake_B_reg = self.comp_B_reg
def forward(self):
(_, feature_map) = self.netG(self.real_A, self.hint_B, self.mask_B)
self.fake_B_reg = self.netGF(self.full_real_A, self.full_hint_B, self.full_mask_B, feature_map, self.box_info_list)
def save_current_imgs(self, path, is_cuda=True):
if is_cuda:
out_img = torch.clamp(util.lab2rgb(torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt), 0.0, 1.0)
else:
out_img = torch.clamp(util.lab2rgb(torch.cat((self.full_real_A.type(torch.FloatTensor), self.fake_B_reg.type(torch.FloatTensor)), dim=1), self.opt), 0.0, 1.0)
out_img = np.transpose(out_img.cpu().data.numpy()[0], (1, 2, 0))
io.imsave(path, img_as_ubyte(out_img))
def setup_to_test(self, fusion_weight_path, map_location):
GF_path = 'checkpoints/{0}/latest_net_GF.pth'.format(fusion_weight_path)
print('load Fusion model from %s' % GF_path)
GF_state_dict = torch.load(GF_path, map_location=map_location)
# G_path = 'checkpoints/coco_finetuned_mask_256/latest_net_G.pth' # fine tuned on cocostuff
G_path = 'checkpoints/{0}/latest_net_G.pth'.format(fusion_weight_path)
G_state_dict = torch.load(G_path, map_location=map_location)
# GComp_path = 'checkpoints/siggraph_retrained/latest_net_G.pth' # original net
# GComp_path = 'checkpoints/coco_finetuned_mask_256/latest_net_GComp.pth' # fine tuned on cocostuff
GComp_path = 'checkpoints/{0}/latest_net_GComp.pth'.format(fusion_weight_path)
GComp_state_dict = torch.load(GComp_path, map_location=map_location)
self.netGF.load_state_dict(GF_state_dict, strict=False)
self.netG.module.load_state_dict(G_state_dict, strict=False)
self.netGComp.module.load_state_dict(GComp_state_dict, strict=False)
self.netGF.eval()
self.netG.eval()
self.netGComp.eval()