Mentions légales du service

Skip to content
Snippets Groups Projects
Commit e25dec37 authored by KOPANAS Georgios's avatar KOPANAS Georgios
Browse files

test_path camera render

parent a9699a3c
Branches
No related tags found
No related merge requests found
...@@ -59,7 +59,7 @@ class Camera(nn.Module): ...@@ -59,7 +59,7 @@ class Camera(nn.Module):
self.FoVx = FoVx self.FoVx = FoVx
self.FoVy = FoVy self.FoVy = FoVy
self.original_image = image.clamp(0.0, 1.0).cuda() self.original_image = image.clamp(0.0, 1.0)
if loaded_optimizedimages is not None: if loaded_optimizedimages is not None:
self.image = nn.Parameter(loaded_optimizedimages.requires_grad_(True)) self.image = nn.Parameter(loaded_optimizedimages.requires_grad_(True))
else: else:
...@@ -88,22 +88,25 @@ class Camera(nn.Module): ...@@ -88,22 +88,25 @@ class Camera(nn.Module):
self.point_grid = getNormalisedImageGrid(self.image_height, self.image_width).cuda() self.point_grid = getNormalisedImageGrid(self.image_height, self.image_width).cuda()
self.depth_map = loaded_depthmap.cuda() if loaded_depthmap is not None:
if loaded_depthdelta is not None: self.depth_map = loaded_depthmap.cuda()
self.depth_delta = nn.Parameter(loaded_depthdelta.requires_grad_(True)) if loaded_depthdelta is not None:
else: self.depth_delta = nn.Parameter(loaded_depthdelta.requires_grad_(True))
self.depth_delta = nn.Parameter(torch.zeros_like(self.depth_map, device="cuda").requires_grad_(True)) else:
self.depth_delta = nn.Parameter(torch.zeros_like(self.depth_map, device="cuda").requires_grad_(True))
if loaded_normaldelta is not None: if loaded_normalmap is not None:
self.normal_map = nn.Parameter(loaded_normaldelta.cuda().requires_grad_(True)) if loaded_normaldelta is not None:
else: self.normal_map = nn.Parameter(loaded_normaldelta.cuda().requires_grad_(True))
self.normal_map = nn.Parameter(loaded_normalmap.cuda().requires_grad_(True)) else:
self.normal_map = nn.Parameter(loaded_normalmap.cuda().requires_grad_(True))
self.zfar = self.getDepth().max()*2.0
try: try:
self.zfar = self.getDepth().max()*2.0
self.znear = self.getDepth()[self.getDepth() > 0].min()/2.0 self.znear = self.getDepth()[self.getDepth() > 0].min()/2.0
except: except:
self.znear = 0.0000001 self.znear = 0.0000001
self.zfar = 100.0
self.world_view_transform = getWorld2View(torch.tensor(R), torch.tensor(T)).cuda() self.world_view_transform = getWorld2View(torch.tensor(R), torch.tensor(T)).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(1,2).cuda() self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(1,2).cuda()
...@@ -336,7 +339,7 @@ class Scene(): ...@@ -336,7 +339,7 @@ class Scene():
""" """
:param path: Path to colmap scene main folder. :param path: Path to colmap scene main folder.
""" """
self.extra_features = extra_features
self.name = name self.name = name
max_radius = max_radius max_radius = max_radius
...@@ -564,16 +567,16 @@ class MultiScene: ...@@ -564,16 +567,16 @@ class MultiScene:
testcams.extend(scene.getTestCameras()) testcams.extend(scene.getTestCameras())
return testcams return testcams
def getPCloudCamsForEuclidean(self, viewpoint_cam, sample): def getPCloudCamsForEuclidean(self, viewpoint_cam, sample, N=9):
for scene in self.scenes: for scene in self.scenes:
if viewpoint_cam in scene.getAllCameras(): if viewpoint_cam in scene.getAllCameras():
euclidean_neighbors = scene.getNClosestCameras(viewpoint_cam, 8+5) euclidean_neighbors = scene.getNClosestCameras(viewpoint_cam, N+5)
if sample: if sample:
filtered_neighbors = randomizeNeighbors(euclidean_neighbors, 8) filtered_neighbors = randomizeNeighbors(euclidean_neighbors, N)
while len(filtered_neighbors) < 8: while len(filtered_neighbors) < 8:
filtered_neighbors = randomizeNeighbors(viewpoint_cam.neighbors, 8) filtered_neighbors = randomizeNeighbors(viewpoint_cam.neighbors, N)
else: else:
filtered_neighbors = euclidean_neighbors[:8] filtered_neighbors = euclidean_neighbors[:N]
pcloud_cams = [scene.getAllCameras()[cam_idx] for cam_idx in filtered_neighbors] pcloud_cams = [scene.getAllCameras()[cam_idx] for cam_idx in filtered_neighbors]
return pcloud_cams return pcloud_cams
assert 0, "Didnt find camera in scenes" assert 0, "Didnt find camera in scenes"
......
...@@ -61,6 +61,66 @@ def readFvsSceneInfo(name, path): ...@@ -61,6 +61,66 @@ def readFvsSceneInfo(name, path):
cameras=cameras) cameras=cameras)
return scene_info return scene_info
def readColmapSceneInfo_Test(name, path):
ply_path = os.path.join(path, "stereo/meshed-delaunay.ply")
blacklist = []
try:
blacklist_file = os.path.join(path, "database.blacklist")
with open(blacklist_file, "r") as f:
blacklist = [file_name.split(".")[0] for file_name in f.read().split("\n")]
except:
print("No blacklist file!")
cameras_extrinsic_file = os.path.join(path, "stereo/sparse/images.txt")
cameras_intrinsic_file = os.path.join(path, "stereo/sparse/cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
num_cameras = len(cam_extrinsics)
cameras = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading \"{}\" | Camera {}/{}".format(name, idx, len(cam_extrinsics)))
sys.stdout.flush()
# if key not in cam_intrinsics:
# continue
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = 2.0 * math.atan(0.5 * height / focal_length_y)
FovX = 2.0 * math.atan(0.5 * width / focal_length_x)
image_path = os.path.join(path, "stereo/images", extr.name)
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
image = torch.from_numpy(np.array(image)) / 255.0
image = image.permute(2, 0, 1).unsqueeze(dim=0)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name)
cameras.append(cam_info)
# photometric_depth_map = torch.tensor(
# read_colmap_bin_array(os.path.join(path, "colmap/stereo/stereo/depth_maps/",
# extr.name + ".photometric.bin"))).unsqueeze(dim=0).unsqueeze(dim=0)
# geometric_depth_map = torch.tensor(
# read_colmap_bin_array(os.path.join(path, "colmap/stereo/stereo/depth_maps/",
# extr.name + ".geometric.bin"))).unsqueeze(dim=0).unsqueeze(dim=0)
sys.stdout.write('\n')
scene_info = SceneInfo(ply_path=ply_path,
num_cameras=num_cameras,
cameras=cameras,
blacklist=blacklist)
return scene_info
def readColmapSceneInfo(name, path): def readColmapSceneInfo(name, path):
ply_path = os.path.join(path, "colmap/stereo/meshed-delaunay.ply") ply_path = os.path.join(path, "colmap/stereo/meshed-delaunay.ply")
......
import torch import torch
import random import random
from scene_loaders.ibr_scene import MultiScene from scene_loaders.ibr_scene import MultiScene, Camera
import torchvision import torchvision
import math import math
import sys import sys
...@@ -14,6 +14,18 @@ import argparse ...@@ -14,6 +14,18 @@ import argparse
import os import os
from utils.image_utils import crop_image from utils.image_utils import crop_image
from os import makedirs from os import makedirs
from scene_loaders.read_scenes_types import readColmapSceneInfo_Test
from PIL import Image
import numpy as np
def PILtoTorch(pil_image, resolution=None):
if resolution:
pil_image = pil_image.resize(resolution)
resized_image = torch.from_numpy(np.array(pil_image)) / 255.0
if len(resized_image.shape) == 3:
return resized_image.permute(2, 0, 1).unsqueeze(dim=0)
else:
return resized_image.unsqueeze(dim=-1).permute(2, 0, 1).unsqueeze(dim=0)
def mse(img1, img2): def mse(img1, img2):
return (((img1 - img2)) ** 2).mean() return (((img1 - img2)) ** 2).mean()
...@@ -87,14 +99,14 @@ with torch.no_grad(): ...@@ -87,14 +99,14 @@ with torch.no_grad():
torch.manual_seed(0) torch.manual_seed(0)
random.seed(0) random.seed(0)
dataset_name = args.scene_name dataset_name = "hallway_lamp"
args.input_path = "/data/graphdeco/user/gkopanas/scenes/peter/" + dataset_name + "_perview.json" args.input_path = "/data/graphdeco/user/gkopanas/scenes/catacaustic_new/hallway_lamp/sibr/pbnrScene/" + dataset_name + ".json"
with open(args.input_path) as json_file: with open(args.input_path) as json_file:
input_json = json.load(json_file) input_json = json.load(json_file)
input_json["neural_weights_folder"] = "/data/graphdeco/user/gkopanas/pointbased_neural_rendering/pbnr_pytorch/tensorboard_3d/" + dataset_name + "_final/neural_renderer" input_json["neural_weights_folder"] = "/data/graphdeco/user/gkopanas/pointbased_neural_rendering/pbnr_pytorch/tensorboard_3d/" + dataset_name + "_final_32/neural_renderer"
input_json["scenes"][0]["scene_representation_folder"] = "/data/graphdeco/user/gkopanas/pointbased_neural_rendering/pbnr_pytorch/tensorboard_3d/" + dataset_name + "_final/" + dataset_name input_json["scenes"][0]["scene_representation_folder"] = "/data/graphdeco/user/gkopanas/pointbased_neural_rendering/pbnr_pytorch/tensorboard_3d/" + dataset_name + "_final_32/" + dataset_name
for load_iter in [20000, 40000, 60000, 80000, 100000]: for load_iter in [20000, 40000, 60000, 80000, 100000]:
print("Load Iter {}:".format(load_iter)) print("Load Iter {}:".format(load_iter))
...@@ -104,23 +116,48 @@ with torch.no_grad(): ...@@ -104,23 +116,48 @@ with torch.no_grad():
args.max_radius, args.extra_features, args.max_radius, args.extra_features,
int(args.test_cameras), load_iter) int(args.test_cameras), load_iter)
output = "/data/graphdeco/user/gkopanas/pointbased_neural_rendering/pbnr_pytorch/tensorboard_3d/" + dataset_name + "_final/backlisted_renders_{}_{}_".format(args.w, args.h) + str(load_iter)
test_cameras_info = readColmapSceneInfo_Test(dataset_name, r"/data/graphdeco/user/gkopanas/scenes/catacaustic_new/hallway_lamp/colmap_1000/test_path_colmap").cameras
test_cameras = []
for cam_info in test_cameras_info:
loaded_image = None
print(cam_info.image_path)
if os.path.exists(cam_info.image_path):
loaded_image = PILtoTorch(Image.open(cam_info.image_path))
test_cameras.append(Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T, FoVx=cam_info.FovX,
FoVy=cam_info.FovY, image=loaded_image,
image_name=cam_info.image_name,
max_radius=scene.scenes[0].getAllCameras()[0].max_radius,
extra_features=scene.scenes[0].getAllCameras()[0].extra_features.shape[1],
loaded_depthmap=None,
loaded_normalmap=None,
uid=0,
loaded_depthdelta=None,
loaded_normaldelta=None,
loaded_uncertainty=None,
loaded_extrafeatures=None,
loaded_expcoefs=None,
loaded_optimizedimages=None))
print(test_cameras)
output = "/data/graphdeco/user/gkopanas/pointbased_neural_rendering/pbnr_pytorch/tensorboard_3d/" + dataset_name + "_final_32/backlisted_renders_{}_{}_".format(args.w, args.h) + str(load_iter)
makedirs(output, exist_ok=True) makedirs(output, exist_ok=True)
imgs = torch.tensor([]) imgs = torch.tensor([])
gts = torch.tensor([]) gts = torch.tensor([])
for view_cam in scene.scenes[0].blacklist_cameras: for view_cam in test_cameras:
print(view_cam.image_name)
view_list = view_cam.neighbors view_list = view_cam.neighbors
view_cam.image_width = args.w view_cam.image_width = 1000
view_cam.image_height = args.h view_cam.image_height = 666
image, image_stack, _ = render_viewpoint(view_cam, scene.getPCloudCamsForScore(view_cam, sample=False, N=10), patch=None) image, image_stack, _ = render_viewpoint(view_cam, scene.scenes[0].getNClosestCameras(view_cam, 10), patch=None)
torchvision.utils.save_image(image, torchvision.utils.save_image(image,
os.path.join(output, "out_{}.png".format(view_cam.image_name))) os.path.join(output, "out_{}.png".format(view_cam.image_name)))
torchvision.utils.save_image(view_cam.image, torchvision.utils.save_image(view_cam.image,
os.path.join(output, "gt_{}.png".format(view_cam.image_name))) os.path.join(output, "gt_{}.png".format(view_cam.image_name)))
imgs = torch.cat((imgs, image.cpu()), dim=0) imgs = torch.cat((imgs, image.cpu()), dim=0)
gts = torch.cat((gts, view_cam.image), dim=0) gts = torch.cat((gts, view_cam.image.cpu()), dim=0)
num_psnr = psnr(imgs, gts) num_psnr = psnr(imgs, gts)
print(num_psnr) print(num_psnr)
......
...@@ -161,7 +161,7 @@ while True: ...@@ -161,7 +161,7 @@ while True:
viewpoint_idx = randint(0, len(viewpoint_stack)-1) viewpoint_idx = randint(0, len(viewpoint_stack)-1)
viewpoint_cam = viewpoint_stack.pop(viewpoint_idx) viewpoint_cam = viewpoint_stack.pop(viewpoint_idx)
n_num = randint(5, 12) n_num = randint(5, 8)
pcloud_cams = scene.getPCloudCamsForScore(viewpoint_cam, sample=True, N=n_num) pcloud_cams = scene.getPCloudCamsForScore(viewpoint_cam, sample=True, N=n_num)
random.shuffle(pcloud_cams) random.shuffle(pcloud_cams)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment