Mentions légales du service

Skip to content
Snippets Groups Projects
Commit 12ecc1ca authored by KOPANAS Georgios's avatar KOPANAS Georgios
Browse files

blacklist

parent 0d98f20f
Branches
No related tags found
No related merge requests found
......@@ -100,7 +100,10 @@ class Camera(nn.Module):
self.normal_map = nn.Parameter(loaded_normalmap.cuda().requires_grad_(True))
self.zfar = self.getDepth().max()*2.0
self.znear = self.getDepth()[self.getDepth() > 0].min()/2.0
try:
self.znear = self.getDepth()[self.getDepth() > 0].min()/2.0
except:
self.znear = 0.0000001
self.world_view_transform = getWorld2View(torch.tensor(R), torch.tensor(T)).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(1,2).cuda()
......@@ -329,7 +332,7 @@ class Camera(nn.Module):
class Scene():
def __init__(self, path, name, scene_type, scene_representation_folder,
max_radius, extra_features, test_cameras=3, load_iter=None):
max_radius, extra_features, test_cameras, load_iter):
"""
:param path: Path to colmap scene main folder.
"""
......@@ -344,15 +347,19 @@ class Scene():
load_iter = searchForMaxIteration(os.path.join(scene_representation_folder, "depth_deltas"))
self.cameras = []
self.test_cameras = test_cameras
self.map_imgfilename_to_idx = {}
scene_info = sceneLoadTypeCallbacks[scene_type](name, path)
self.cameras = []
self.blacklist_cameras = []
self.blacklist = scene_info.blacklist
for idx, cam_info in enumerate(scene_info.cameras):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Loading \"{}\" | Camera {}/{}".format(name, idx, len(scene_info.cameras)))
sys.stdout.write("Loading \"{}\" | Camera {}/{} - Blacklist: {}".format(name, idx,
len(scene_info.cameras),
len(self.blacklist)))
sys.stdout.flush()
depthmap_path = os.path.join(path, "pbnrScene/depth_maps_type_2/",
......@@ -381,26 +388,28 @@ class Scene():
loaded_extrafeatures = None
if load_iter:
loaded_depthdelta = torch.load(os.path.join(scene_representation_folder, "depth_deltas",
"iteration_" + str(load_iter),
cam_info.image_name + ".depth_delta")).cuda()
loaded_normaldelta = torch.load(os.path.join(scene_representation_folder, "normals",
"iteration_" + str(load_iter),
cam_info.image_name + ".normals")).cuda()
loaded_uncertainty = torch.load(os.path.join(scene_representation_folder, "uncertainty_map",
"iteration_" + str(load_iter),
cam_info.image_name + ".uncertainty")).cuda()
loaded_expcoefs = torch.load(os.path.join(scene_representation_folder, "exposure_coef",
"iteration_" + str(load_iter),
cam_info.image_name + ".exp_coef")).cuda()
loaded_optimizedimages = torch.load(os.path.join(scene_representation_folder, "images",
"iteration_" + str(load_iter),
cam_info.image_name + ".img")).cuda()
loaded_extrafeatures = torch.load(os.path.join(scene_representation_folder, "learned_features",
"iteration_" + str(load_iter),
cam_info.image_name + ".feat")).cuda()
self.cameras.append(Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T, FoVx=cam_info.FovX,
try:
loaded_depthdelta = torch.load(os.path.join(scene_representation_folder, "depth_deltas",
"iteration_" + str(load_iter),
cam_info.image_name + ".depth_delta")).cuda()
loaded_normaldelta = torch.load(os.path.join(scene_representation_folder, "normals",
"iteration_" + str(load_iter),
cam_info.image_name + ".normals")).cuda()
loaded_uncertainty = torch.load(os.path.join(scene_representation_folder, "uncertainty_map",
"iteration_" + str(load_iter),
cam_info.image_name + ".uncertainty")).cuda()
loaded_expcoefs = torch.load(os.path.join(scene_representation_folder, "exposure_coef",
"iteration_" + str(load_iter),
cam_info.image_name + ".exp_coef")).cuda()
loaded_optimizedimages = torch.load(os.path.join(scene_representation_folder, "images",
"iteration_" + str(load_iter),
cam_info.image_name + ".img")).cuda()
loaded_extrafeatures = torch.load(os.path.join(scene_representation_folder, "learned_features",
"iteration_" + str(load_iter),
cam_info.image_name + ".feat")).cuda()
except:
print("\nSkipping Loading View {} idx {}".format(cam_info.image_name, idx))
camera = Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T, FoVx=cam_info.FovX,
FoVy=cam_info.FovY, image=loaded_image,
image_name=cam_info.image_name,
max_radius=max_radius,
......@@ -413,7 +422,12 @@ class Scene():
loaded_uncertainty=loaded_uncertainty,
loaded_extrafeatures=loaded_extrafeatures,
loaded_expcoefs=loaded_expcoefs,
loaded_optimizedimages=loaded_optimizedimages))
loaded_optimizedimages=loaded_optimizedimages)
if (cam_info.image_name in self.blacklist):
self.blacklist_cameras.append(camera)
else:
self.cameras.append(camera)
sys.stdout.write('\n')
self.shuffleCameras()
for idx, cam in enumerate(self.cameras):
......@@ -423,7 +437,9 @@ class Scene():
if os.path.exists(neighbors_dict_path):
with open(neighbors_dict_path) as json_file:
neighbors_dict = json.load(json_file)
for idx, cam in enumerate(self.getAllCameras()):
for cam in self.cameras:
cam.neighbors = [self.map_imgfilename_to_idx[image_name] for image_name in neighbors_dict[os.path.basename(cam.image_name)]]
for cam in self.blacklist_cameras:
cam.neighbors = [self.map_imgfilename_to_idx[image_name] for image_name in neighbors_dict[os.path.basename(cam.image_name)]]
def getNClosestCameras(self, ref_camera, N):
......@@ -561,19 +577,16 @@ class MultiScene:
return pcloud_cams
assert 0, "Didnt find camera in scenes"
def getPCloudCamsForScore(self, viewpoint_cam, sample):
for scene in self.scenes:
if viewpoint_cam in scene.getAllCameras():
max_coverage_neighbors = viewpoint_cam.neighbors
if sample:
filtered_neighbors = randomizeNeighbors(max_coverage_neighbors, 8+1)
while len(filtered_neighbors) < 8+1:
filtered_neighbors = randomizeNeighbors(max_coverage_neighbors, 8+1)
else:
filtered_neighbors = max_coverage_neighbors[:8+1]
pcloud_cams = [scene.getAllCameras()[cam_idx] for cam_idx in filtered_neighbors]
return pcloud_cams
assert 0, "Didnt find camera in scenes"
def getPCloudCamsForScore(self, viewpoint_cam, sample, N=9):
max_coverage_neighbors = viewpoint_cam.neighbors
if sample:
filtered_neighbors = randomizeNeighbors(max_coverage_neighbors, N)
while len(filtered_neighbors) < N:
filtered_neighbors = randomizeNeighbors(max_coverage_neighbors, N)
else:
filtered_neighbors = max_coverage_neighbors[:N]
pcloud_cams = [self.scenes[0].getAllCameras()[cam_idx] for cam_idx in filtered_neighbors]
return pcloud_cams
def save(self, path, iteration):
for scene in self.scenes:
......
......@@ -21,6 +21,7 @@ class SceneInfo(NamedTuple):
ply_path: str
num_cameras: int
cameras: list
blacklist: list
def readFvsSceneInfo(name, path):
ply_path = os.path.join(path, "dense/delaunay_photometric.ply")
......@@ -64,12 +65,20 @@ def readFvsSceneInfo(name, path):
def readColmapSceneInfo(name, path):
ply_path = os.path.join(path, "colmap/stereo/meshed-delaunay.ply")
blacklist = []
try:
blacklist_file = os.path.join(path, "colmap/database.blacklist")
with open(blacklist_file, "r") as f:
blacklist = [file_name.split(".")[0] for file_name in f.read().split("\n")]
except:
print("No blacklist file!")
cameras_extrinsic_file = os.path.join(path, "colmap/stereo/sparse/images.txt")
cameras_intrinsic_file = os.path.join(path, "colmap/stereo/sparse/cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
assert (len(cam_extrinsics) == len(cam_intrinsics))
num_cameras = len(cam_extrinsics)
cameras = []
for idx, key in enumerate(cam_extrinsics):
......@@ -110,7 +119,8 @@ def readColmapSceneInfo(name, path):
sys.stdout.write('\n')
scene_info = SceneInfo(ply_path=ply_path,
num_cameras=num_cameras,
cameras=cameras)
cameras=cameras,
blacklist=blacklist)
return scene_info
def readNVMFile(path):
......
......@@ -8,6 +8,48 @@ from datetime import datetime
from diff_rasterization.soft_depth_test import _SoftDepthTest
import time
import cv2
import json
from utils.system_utils import searchForMaxIteration
import argparse
import os
from utils.image_utils import crop_image
def mse(img1, img2):
return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
def psnr(img1, img2):
mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
return 20 * torch.log10(1.0 / torch.sqrt(mse))
def render_viewpoint(viewpoint_camera, pcloud_cameras, patch=None, gamma=1.0):
if patch==None:
patch = (0, 0, viewpoint_camera.image_width, viewpoint_camera.image_height)
patch_origin_x, patch_origin_y, patch_size_x, patch_size_y = patch
features_stack = torch.tensor([]).to(device)
color_stack = torch.tensor([]).to(device)
depth_gmms_stack = torch.tensor([]).to(device)
num_gmms_stack = torch.tensor([]).int().to(device)
l2_stack = torch.tensor([]).to(device)
blend_scores_stack = torch.tensor([]).to(device)
for idx, pcloud_cam in enumerate(pcloud_cameras):
rendered_point_cloud, depth_gmms, num_gmms, blend_scores = pcloud_cam.render_patch(viewpoint_camera=viewpoint_camera,
patch_origin_x=patch_origin_x, patch_size_x=patch_size_x,
patch_origin_y=patch_origin_y, patch_size_y=patch_size_y,
gamma=gamma)
color_stack = torch.cat((color_stack, rendered_point_cloud), dim=0)
blend_scores_stack = torch.cat((blend_scores_stack, blend_scores), dim=0)
features_stack = torch.cat((features_stack, rendered_point_cloud), dim=0)
depth_gmms_stack = torch.cat((depth_gmms_stack, depth_gmms), dim=0)
num_gmms_stack = torch.cat((num_gmms_stack, num_gmms.int()), dim=0)
l2_stack = torch.cat((l2_stack, torch.nn.functional.mse_loss(rendered_point_cloud[:,:3,:,:], crop_image(viewpoint_camera.image, patch)*blend_scores).unsqueeze(0)), dim=0)
color_stack = color_stack.view(color_stack.shape[0], -1, 3, color_stack.shape[2], color_stack.shape[3])
with torch.no_grad():
prob_map = _SoftDepthTest.apply(depth_gmms_stack, num_gmms_stack)
image = neural_renderer(features_stack, prob_map * blend_scores_stack)
return image, color_stack, l2_stack.mean()
old_f = sys.stdout
class F:
......@@ -26,44 +68,54 @@ torch.cuda.set_device(device)
torch.manual_seed(0)
random.seed(0)
scene = MultiScene(multi_scene_json="../../scenes/deep_blending/museum_perview.json",
max_radius=30, extra_features=3,
test_cameras=3)
viewpoint_cam = scene.scenes[0].getAllCameras()[0]
depth_gmms_stack = torch.tensor([]).to(device)
num_gmms_stack = torch.tensor([]).int().to(device)
view_list = [1, 2, 23,24]
for i, cam in enumerate([scene.scenes[0].getAllCameras()[idx] for idx in view_list]):
rasterized, depth_gmms, num_gmms, blend_score = cam.render(viewpoint_cam, 1.0, True)
depth_gmms_stack = torch.cat((depth_gmms_stack, depth_gmms), dim=0)
num_gmms_stack = torch.cat((num_gmms_stack, num_gmms.int()), dim=0)
print("")
torchvision.utils.save_image(rasterized[:, 0:3, :, :],
"F:/train_point_cloud_viewpoint_{}.png".format(i))
cv2.imwrite("F:/blend_score_{}.exr".format(i), blend_score.cpu().squeeze().detach().numpy())
asd=123
list_range = len(view_list)
"""
torch.cuda.synchronize()
time_begin = time.time()
prob_map = _SoftDepthTest.apply(list_depth_gmms[0], list_num_gmms[0],
list_depth_gmms[2], list_num_gmms[2])
torch.cuda.synchronize()
time_end = time.time()
cv2.imwrite("F:/prob_map_{}_{}.exr".format(0, 2), prob_map.cpu().detach().numpy())
cv2.imwrite("F:/list_num_gmms_2.exr".format(2), list_num_gmms[2].float().squeeze().cpu().detach().numpy())
cv2.imwrite("F:/list_num_gmms_0.exr".format(0), list_num_gmms[0].float().squeeze().cpu().detach().numpy())
print("Rast: {} | ".format(time_end - time_begin))
"""
parser = argparse.ArgumentParser(description='Train your network sailor.')
parser.add_argument('-i', '--input_path', required=False)
parser.add_argument('--load_iter', required=False, type=int, default=None)
parser.add_argument('--max_radius', required=False, type=int, default=8)
parser.add_argument('--test_cameras', required=False, default=3)
parser.add_argument('--extra_features', type=int, default=6)
args = parser.parse_args()
with torch.no_grad():
prob_map = _SoftDepthTest.apply(depth_gmms_stack, num_gmms_stack)
for i in range(list_range):
cv2.imwrite("F:/prob_map_{}.exr".format(i), prob_map[i,:,:,:].squeeze().cpu().detach().numpy())
device = torch.device("cuda:0")
torch.cuda.set_device(device)
torch.manual_seed(0)
random.seed(0)
args.input_path = "F:/gkopanas/pointbasedIBR/scenes/peter/gardenvase.json"
with open(args.input_path) as json_file:
input_json = json.load(json_file)
input_json["neural_weights_folder"] = r"F:\gkopanas\pointbasedIBR\tensorboard_3d_cluster\peter\gardenvase\neural_renderer"
input_json["scenes"][0]["scene_representation_folder"] = r"F:\gkopanas\pointbasedIBR\tensorboard_3d_cluster\peter\gardenvase\gardenvase"
neural_renderer = None
if input_json.get("neural_weights_folder"):
if args.load_iter:
load_iter = args.load_iter
else:
load_iter = searchForMaxIteration(input_json.get("neural_weights_folder"))
neural_renderer = torch.jit.load(os.path.join(input_json.get("neural_weights_folder"), "model_" + str(load_iter)))
scene = MultiScene(input_json,
args.max_radius, args.extra_features,
int(args.test_cameras), load_iter)
imgs = torch.tensor([]).to(device)
gts = torch.tensor([]).int().to(device)
for view_cam in scene.scenes[0].blacklist_cameras:
view_list = view_cam.neighbors
image, image_stack, _ = render_viewpoint(view_cam, scene.getPCloudCamsForScore(view_cam, sample=False, N=13), patch=None)
torchvision.utils.save_image(image,
"F:/out_{}.png".format(view_cam.image_name))
torchvision.utils.save_image(view_cam.image,
"F:/gt_{}.png".format(view_cam.image_name))
imgs = torch.cat((imgs, image), dim=0)
gts = torch.cat((gts, view_cam.image), dim=0)
print(psnr(imgs, gts))
asd=123
......@@ -86,7 +86,7 @@ parser.add_argument('--skip_validation', action='store_true', dest='skip_validat
parser.add_argument('--extra_features', type=int, default=6)
parser.add_argument('--max_radius', required=False, type=int, default=8)
parser.add_argument('--test_cameras', required=False, default=3)
parser.add_argument('--test_cameras', required=False, default=0)
parser.add_argument('--input_views', required=False, default=9)
......@@ -209,14 +209,15 @@ while True:
mkdir_p("./{}/neural_renderer/".format(tensorboard_folder))
neural_renderer.save("./{}/neural_renderer/model_{}".format(tensorboard_folder, iteration))
scene.save(tensorboard_folder, iteration)
if iteration%240==0 and not args.skip_validation:
if iteration%500==0 and not args.skip_validation:
torch.cuda.empty_cache()
total_loss = 0.0
total_L1 = 0.0
for test_viewpoint in scene.getAllTestCameras():
test_cam_list = scene.scenes[0].blacklist_cameras[:3]
for test_viewpoint in test_cam_list:
torch.cuda.empty_cache()
test_pcloud_cams = scene.getPCloudCamsForScore(test_viewpoint, sample=False)
test_pcloud_cams = scene.getPCloudCamsForScore(test_viewpoint, sample=False, N=12)
random.shuffle(test_pcloud_cams)
image, image_stack, _ = render_viewpoint(test_viewpoint, test_pcloud_cams, None)
gt_image = test_viewpoint.getImageHarmonized().to("cuda")
......@@ -224,8 +225,8 @@ while True:
Ll1 = l1_loss(image, gt_image)
loss = Ll1
total_loss += loss.item()/(len(scene.getAllTestCameras()))
total_L1 += Ll1.item()/(len(scene.getAllTestCameras()))
total_loss += loss.item()/(len(test_cam_list))
total_L1 += Ll1.item()/(len(test_cam_list))
tb_writer.add_images("test_neural_render_{}".format(test_viewpoint.image_name), torch.clamp(image, 0.0, 1.0),
global_step=iteration)
......
......@@ -73,13 +73,10 @@ namespace sibr {
if (preprocess) {
std::cout << "PreProcessing" << std::endl;
json j;
int w = 900;
int h = 600;
int w = 1297;
int h = 840;
for (int idx = 0; idx < cameras()->inputCameras().size(); idx++) {
const sibr::InputCamera::Ptr cam = cameras()->inputCameras()[idx];
if (!cam->isActive()) {
continue;
}
std::string image_path = myArgs.dataset_path.get() + "/pbnrScene/images/";
makeDirectory(image_path);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment