|
|
@ -118,16 +118,16 @@ def create_data(out_root, idx, n_samples, imsize, patterns, K, baseline, blend_i |
|
|
|
cam_x = cam_x_ + rng.uniform(-0.1,0.1) |
|
|
|
cam_x = cam_x_ + rng.uniform(-0.1,0.1) |
|
|
|
cam_y = cam_y_ + rng.uniform(-0.1,0.1) |
|
|
|
cam_y = cam_y_ + rng.uniform(-0.1,0.1) |
|
|
|
cam_z = cam_z_ + rng.uniform(-0.1,0.1) |
|
|
|
cam_z = cam_z_ + rng.uniform(-0.1,0.1) |
|
|
|
|
|
|
|
|
|
|
|
tcam = np.array([cam_x, cam_y, cam_z], dtype=np.float32) |
|
|
|
tcam = np.array([cam_x, cam_y, cam_z], dtype=np.float32) |
|
|
|
|
|
|
|
|
|
|
|
if np.linalg.norm(tcam[0:2])<1e-9: |
|
|
|
if np.linalg.norm(tcam[0:2])<1e-9: |
|
|
|
Rcam = np.eye(3, dtype=np.float32) |
|
|
|
Rcam = np.eye(3, dtype=np.float32) |
|
|
|
else: |
|
|
|
else: |
|
|
|
Rcam = get_rotation_matrix(center, center-tcam) |
|
|
|
Rcam = get_rotation_matrix(center, center-tcam) |
|
|
|
|
|
|
|
|
|
|
|
tproj = tcam + basevec |
|
|
|
tproj = tcam + basevec |
|
|
|
Rproj = Rcam |
|
|
|
Rproj = Rcam |
|
|
|
|
|
|
|
|
|
|
|
ret['R'].append(Rcam) |
|
|
|
ret['R'].append(Rcam) |
|
|
|
ret['t'].append(tcam) |
|
|
|
ret['t'].append(tcam) |
|
|
@ -167,7 +167,7 @@ def create_data(out_root, idx, n_samples, imsize, patterns, K, baseline, blend_i |
|
|
|
ambient = pyrenderer.normal().copy() |
|
|
|
ambient = pyrenderer.normal().copy() |
|
|
|
ambient = np.mean(ambient, axis=2) |
|
|
|
ambient = np.mean(ambient, axis=2) |
|
|
|
|
|
|
|
|
|
|
|
# get the noise free IR image $J$ |
|
|
|
# get the noise free IR image $J$ |
|
|
|
im = blend_im_rnd * im + (1 - blend_im_rnd) * ambient |
|
|
|
im = blend_im_rnd * im + (1 - blend_im_rnd) * ambient |
|
|
|
ret[f'ambient{s}'].append( ambient[None].astype(np.float32) ) |
|
|
|
ret[f'ambient{s}'].append( ambient[None].astype(np.float32) ) |
|
|
|
|
|
|
|
|
|
|
@ -207,13 +207,13 @@ def create_data(out_root, idx, n_samples, imsize, patterns, K, baseline, blend_i |
|
|
|
if __name__=='__main__': |
|
|
|
if __name__=='__main__': |
|
|
|
|
|
|
|
|
|
|
|
np.random.seed(42) |
|
|
|
np.random.seed(42) |
|
|
|
|
|
|
|
|
|
|
|
# output directory |
|
|
|
# output directory |
|
|
|
with open('../config.json') as fp: |
|
|
|
with open('../config.json') as fp: |
|
|
|
config = json.load(fp) |
|
|
|
config = json.load(fp) |
|
|
|
data_root = Path(config['DATA_ROOT']) |
|
|
|
data_root = Path(config['DATA_ROOT']) |
|
|
|
shapenet_root = config['SHAPENET_ROOT'] |
|
|
|
shapenet_root = config['SHAPENET_ROOT'] |
|
|
|
|
|
|
|
|
|
|
|
data_type = 'syn' |
|
|
|
data_type = 'syn' |
|
|
|
out_root = data_root / f'{data_type}' |
|
|
|
out_root = data_root / f'{data_type}' |
|
|
|
out_root.mkdir(parents=True, exist_ok=True) |
|
|
|
out_root.mkdir(parents=True, exist_ok=True) |
|
|
@ -228,27 +228,28 @@ if __name__=='__main__': |
|
|
|
except: |
|
|
|
except: |
|
|
|
pass |
|
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
# load shapenet models |
|
|
|
# load shapenet models |
|
|
|
obj_classes = ['chair'] |
|
|
|
obj_classes = ['chair'] |
|
|
|
objs = get_objs(shapenet_root, obj_classes) |
|
|
|
objs = get_objs(shapenet_root, obj_classes) |
|
|
|
|
|
|
|
|
|
|
|
# camera parameters |
|
|
|
# camera parameters |
|
|
|
imsize = (480, 640) |
|
|
|
imsize = (488, 648) |
|
|
|
imsizes = [(imsize[0]//(2**s), imsize[1]//(2**s)) for s in range(4)] |
|
|
|
imsizes = [(imsize[0]//(2**s), imsize[1]//(2**s)) for s in range(4)] |
|
|
|
K = np.array([[567.6, 0, 324.7], [0, 570.2, 250.1], [0 ,0, 1]], dtype=np.float32) |
|
|
|
# K = np.array([[567.6, 0, 324.7], [0, 570.2, 250.1], [0 ,0, 1]], dtype=np.float32) |
|
|
|
|
|
|
|
K = np.array([[1929.5936336276382, 0, 113.66561071478046], [0, 1911.2517985448746, 473.70108079885887], [0 ,0, 1]], dtype=np.float32) |
|
|
|
focal_lengths = [K[0,0]/(2**s) for s in range(4)] |
|
|
|
focal_lengths = [K[0,0]/(2**s) for s in range(4)] |
|
|
|
baseline=0.075 |
|
|
|
baseline=0.075 |
|
|
|
blend_im = 0.6 |
|
|
|
blend_im = 0.6 |
|
|
|
noise = 0 |
|
|
|
noise = 0 |
|
|
|
|
|
|
|
|
|
|
|
# capture the same static scene from different view points as a track |
|
|
|
# capture the same static scene from different view points as a track |
|
|
|
track_length = 4 |
|
|
|
track_length = 4 |
|
|
|
|
|
|
|
|
|
|
|
# load pattern image |
|
|
|
# load pattern image |
|
|
|
pattern_path = './kinect_pattern.png' |
|
|
|
pattern_path = './kinect_pattern.png' |
|
|
|
pattern_crop = True |
|
|
|
pattern_crop = True |
|
|
|
patterns = get_patterns(pattern_path, imsizes, pattern_crop) |
|
|
|
patterns = get_patterns(pattern_path, imsizes, pattern_crop) |
|
|
|
|
|
|
|
|
|
|
|
# write settings to file |
|
|
|
# write settings to file |
|
|
|
settings = { |
|
|
|
settings = { |
|
|
|
'imsizes': imsizes, |
|
|
|
'imsizes': imsizes, |
|
|
@ -261,7 +262,7 @@ if __name__=='__main__': |
|
|
|
print(f'write settings to {out_path}') |
|
|
|
print(f'write settings to {out_path}') |
|
|
|
with open(str(out_path), 'wb') as f: |
|
|
|
with open(str(out_path), 'wb') as f: |
|
|
|
pickle.dump(settings, f, pickle.HIGHEST_PROTOCOL) |
|
|
|
pickle.dump(settings, f, pickle.HIGHEST_PROTOCOL) |
|
|
|
|
|
|
|
|
|
|
|
# start the job |
|
|
|
# start the job |
|
|
|
n_samples = 2**10 + 2**13 |
|
|
|
n_samples = 2**10 + 2**13 |
|
|
|
for idx in range(start, n_samples): |
|
|
|
for idx in range(start, n_samples): |
|
|
|