|
|
|
@ -44,7 +44,7 @@ def extract_data(data): |
|
|
|
|
# get result and rotate 90 deg |
|
|
|
|
pred_disp = cv2.transpose(np.asarray(data['disp'], dtype='uint8')) |
|
|
|
|
|
|
|
|
|
if input not in data: |
|
|
|
|
if 'input' not in data: |
|
|
|
|
return pred_disp, duration |
|
|
|
|
|
|
|
|
|
in_img = np.asarray(data['input'], dtype='uint8').transpose((2, 0, 1)) |
|
|
|
@ -72,6 +72,12 @@ def put_image(img_path): |
|
|
|
|
return data |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def change_minimal_data(enabled): |
|
|
|
|
r = requests.post(f'{API_URL}/params/minimal_data/{not enabled}') |
|
|
|
|
cv2.destroyWindow('Input Image') |
|
|
|
|
cv2.destroyWindow('Reference Image') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
while True: |
|
|
|
|
for img in os.scandir(img_dir): |
|
|
|
@ -83,14 +89,18 @@ if __name__ == '__main__': |
|
|
|
|
downsize_input_img() |
|
|
|
|
|
|
|
|
|
data = put_image('buffer.png') |
|
|
|
|
if 'input' in data: |
|
|
|
|
pred_disp, in_img, ref_pat, duration = extract_data(data) |
|
|
|
|
else: |
|
|
|
|
pred_disp, duration = extract_data(data) |
|
|
|
|
|
|
|
|
|
print(f'inference took {duration:1.4f}s') |
|
|
|
|
print(f'pipeline and transfer took another {(datetime.now() - start).total_seconds() - float(duration):1.4f}s') |
|
|
|
|
print(f"Pred. Disparity: \n\t{pred_disp.min():.{2}f}/{pred_disp.max():.{2}f}\n") |
|
|
|
|
|
|
|
|
|
if 'input' in data: |
|
|
|
|
cv2.imshow('Input Image', in_img) |
|
|
|
|
# cv2.imshow('Reference Image', ref_pat) |
|
|
|
|
cv2.imshow('Reference Image', ref_pat) |
|
|
|
|
cv2.imshow('Normalized Predicted Disparity', normalize_and_colormap(pred_disp)) |
|
|
|
|
cv2.imshow('Predicted Disparity', pred_disp) |
|
|
|
|
key = cv2.waitKey() |
|
|
|
@ -99,3 +109,5 @@ if __name__ == '__main__': |
|
|
|
|
quit() |
|
|
|
|
elif key == 101: |
|
|
|
|
change_epoch() |
|
|
|
|
elif key == 109: |
|
|
|
|
change_minimal_data('input' not in data) |
|
|
|
|