diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8cbb4d2b738664b9a11d3bd8626892d664b55fc6..88398aa6013bb09e7e73e47eb8f94345bb258826 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,7 +1,7 @@
-project(sibr_gk_all)
+project(sibr_pbnr_all)
 
 add_subdirectory(apps)
 add_subdirectory(renderer)
 
 include(install_runtime)
-subdirectory_target(${PROJECT_NAME} ${CMAKE_CURRENT_LIST_DIR} "projects/gk")
+subdirectory_target(${PROJECT_NAME} ${CMAKE_CURRENT_LIST_DIR} "projects/pbnr")
diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt
index 75cadb16767dca596ea4fea7d6dbf1d4932e0572..cf3986e640cc5819f3587b9f9fa251ab4fd3ff50 100644
--- a/apps/CMakeLists.txt
+++ b/apps/CMakeLists.txt
@@ -9,7 +9,6 @@
 
 
 
-project(SIBR_gk_sibr_apps)
+project(SIBR_pbnr_apps)
 
-add_subdirectory(pytorch_impl/)
 add_subdirectory(opengl_impl/)
\ No newline at end of file
diff --git a/apps/opengl_impl/CMakeLists.txt b/apps/opengl_impl/CMakeLists.txt
index 2c09f723f2cbbb104da9e99b90402a825c7c7b99..4829f75e063d7e94a492f94aae31ae347ad1afa4 100644
--- a/apps/opengl_impl/CMakeLists.txt
+++ b/apps/opengl_impl/CMakeLists.txt
@@ -1,4 +1,4 @@
-project(SIBR_gk_sibr_opengl_impl)
+project(SIBR_pbnr_opengl_impl)
 
 file(GLOB SOURCES "*.cpp" "*.h" "*.hpp")
 source_group("Source Files" FILES ${SOURCES})
@@ -17,7 +17,7 @@ target_link_libraries(${PROJECT_NAME}
 	sibr_assets
 	sibr_ulr
 	sibr_renderer
-	sibr_gk
+	sibr_pbnr
 )
 
 include_directories(
@@ -27,7 +27,7 @@ include_directories(
 )
 
 # Define location in solution.
-set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "projects/gk/apps")
+set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "projects/pbnr/apps")
 
 
 
@@ -36,7 +36,7 @@ include(install_runtime)
 ibr_install_target(${PROJECT_NAME}
     INSTALL_PDB                         ## mean install also MSVC IDE *.pdb file (DEST according to target type)
 	RESOURCES  	${RESOURCES}
-	RSC_FOLDER 	"gk"
+	RSC_FOLDER 	"pbnr"
     STANDALONE  ${INSTALL_STANDALONE}   ## mean call install_runtime with bundle dependencies resolution
     COMPONENT   ${PROJECT_NAME}_install ## will create custom target to install only this project
 )
diff --git a/apps/opengl_impl/main.cpp b/apps/opengl_impl/main.cpp
index 655d113fe707fc37ae40f79c4fe57907b58071aa..eb8b4b798a63b1700d00075e60a94a57e09cab39 100644
--- a/apps/opengl_impl/main.cpp
+++ b/apps/opengl_impl/main.cpp
@@ -10,11 +10,11 @@
 #include <core/system/Utils.hpp>
 
 #include <projects/pointbased_neural_rendering/renderer/DeepLearningPointViewOGL.hpp>
-#include <projects/pointbased_neural_rendering/renderer/GkIBRScene.hpp>
+#include <projects/pointbased_neural_rendering/renderer/PbnrScene.hpp>
 #include "projects/torchgl_interop/renderer/torchgl_interop.h"
 
 #include <Windows.h>
-#define PROGRAM_NAME "gk_sibr_app"
+#define PROGRAM_NAME "pbnr_sibr_app"
 using namespace sibr;
 
 const char* usage = ""
@@ -32,7 +32,7 @@ int main(int ac, char** av) {
 	std::cout << std::setprecision(6);
 	// Parse Commad-line Args
 	CommandLineArgs::parseMainArgs(ac, av);
-	GkIBRAppArgs myArgs;
+	PbnrAppArgs myArgs;
 	myArgs.displayHelpIfRequired();
 	
 	// Window setup
@@ -45,7 +45,7 @@ int main(int ac, char** av) {
 	auto _outTex = std::make_shared<sibr::Texture2DRGB32F>(sibr::ImageRGB32F(neuralRenderResolutionT[0], neuralRenderResolutionT[1], sibr::Vector3f(1.0f, 0.7f, 0.7f)),  SIBR_GPU_LINEAR_SAMPLING);
 	auto _copyToOutTex = std::make_shared<CopyToTextureOp>(_outTex->handle());
 
-	GkIBRScene::Ptr scene(new GkIBRScene(myArgs, myArgs.preprocess_mode));
+	PbnrScene::Ptr scene(new PbnrScene(myArgs, myArgs.preprocess_mode));
 	if (myArgs.preprocess_mode) {
 		return 0;
 	}
diff --git a/apps/pytorch_impl/CMakeLists.txt b/apps/pytorch_impl/CMakeLists.txt
deleted file mode 100644
index 26d780233e380525bae04f69879060cf1a7446eb..0000000000000000000000000000000000000000
--- a/apps/pytorch_impl/CMakeLists.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-project(SIBR_gk_sibr_pytorch_impl)
-
-file(GLOB SOURCES "*.cpp" "*.h" "*.hpp")
-source_group("Source Files" FILES ${SOURCES})
-
-# Define build output for project
-add_executable(${PROJECT_NAME} ${SOURCES})
-
-# Define dependencies
-target_link_libraries(${PROJECT_NAME}
-	${Boost_LIBRARIES}
-	${ASSIMP_LIBRARIES}
-	${GLEW_LIBRARIES}
-	${OPENGL_LIBRARIES}
-    ${OpenCV_LIBRARIES}
-	sibr_view
-	sibr_assets
-	sibr_ulr
-	sibr_renderer
-	sibr_gk
-)
-
-include_directories(
-  ${TORCH_INCLUDE_DIRS}
-  ${TORCH_INCLUDE_BASE_DIRS}
-  ${CUDA_INCLUDE_DIRS}
-)
-
-# Define location in solution.
-set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "projects/gk/apps")
-
-
-
-## High level macro to install in an homogen way all our ibr targets
-include(install_runtime)
-ibr_install_target(${PROJECT_NAME}
-    INSTALL_PDB                         ## mean install also MSVC IDE *.pdb file (DEST according to target type)
-	RESOURCES  	${RESOURCES}
-	RSC_FOLDER 	"gk"
-    STANDALONE  ${INSTALL_STANDALONE}   ## mean call install_runtime with bundle dependencies resolution
-    COMPONENT   ${PROJECT_NAME}_install ## will create custom target to install only this project
-)
-
-
diff --git a/apps/pytorch_impl/main.cpp b/apps/pytorch_impl/main.cpp
deleted file mode 100644
index df45f9fe320e481c518b6189828152c471a57ef8..0000000000000000000000000000000000000000
--- a/apps/pytorch_impl/main.cpp
+++ /dev/null
@@ -1,156 +0,0 @@
-#include <fstream>
-
-#include <core/graphics/Window.hpp>
-#include <core/view/SceneDebugView.hpp>
-#include <core/view/MultiViewManager.hpp>
-#include <core/system/String.hpp>
-#include <core/renderer/DepthRenderer.hpp>
-#include <core/raycaster/Raycaster.hpp>
-#include <core/raycaster/CameraRaycaster.hpp>
-#include <core/system/Utils.hpp>
-
-#include <projects/pointbased_neural_rendering/renderer/DeepLearningPointView.hpp>
-#include <projects/pointbased_neural_rendering/renderer/GkIBRScene.hpp>
-#include "projects/torchgl_interop/renderer/torchgl_interop.h"
-
-#include <Windows.h>
-#define PROGRAM_NAME "gk_sibr_app"
-using namespace sibr;
-
-const char* usage = ""
-"Usage: " PROGRAM_NAME " -path <dataset-path>"    	                                "\n"
-;
-
-bool sortByDistance(sibr::InputCamera lhs, sibr::InputCamera rhs, Vector3f ref_point) {
-	float lhs_dist = (lhs.position() - ref_point).squaredNorm();
-	float rhs_dist = (rhs.position() - ref_point).squaredNorm();
-	return lhs_dist < rhs_dist;
-};
-
-int main(int ac, char** av) {
-	torch::NoGradGuard no_grad_guard;
-	std::cout << std::setprecision(6);
-	// Parse Commad-line Args
-	CommandLineArgs::parseMainArgs(ac, av);
-	GkIBRAppArgs myArgs;
-	myArgs.displayHelpIfRequired();
-	
-	// Window setup
-	sibr::Window window(PROGRAM_NAME, sibr::Vector2i(50, 50), myArgs);
-	sibr::Window::Ptr winptr;
-	winptr.reset(&window);
-	window.makeContextCurrent();
-		
-	Vector2u neuralRenderResolutionT(200, 200);
-	auto _outTex = std::make_shared<sibr::Texture2DRGB32F>(sibr::ImageRGB32F(neuralRenderResolutionT[0], neuralRenderResolutionT[1], sibr::Vector3f(1.0f, 0.7f, 0.7f)),  SIBR_GPU_LINEAR_SAMPLING);
-	auto _copyToOutTex = std::make_shared<CopyToTextureOp>(_outTex->handle());
-
-	GkIBRScene::Ptr scene(new GkIBRScene(myArgs, myArgs.preprocess_mode));
-	if (myArgs.preprocess_mode) {
-		return 0;
-	}
-
-	// Raycaster.
-	std::shared_ptr<sibr::Raycaster> raycaster = std::make_shared<sibr::Raycaster>();
-	raycaster->init();
-	raycaster->addMesh(scene->proxies()->proxy());
-
-	Vector2u neuralRenderResolution(900,
-	                                600);
-	Vector2u totalResolution;
-	if (myArgs.debug_mode) {
-		totalResolution[0] = 3 * neuralRenderResolution[0];
-		totalResolution[1] = 4 * neuralRenderResolution[1];
-	}
-	else {
-		totalResolution = neuralRenderResolution;
-	}
-
-	// Camera handler for main view.
-	sibr::InteractiveCameraHandler::Ptr generalCamera(new InteractiveCameraHandler());
-	generalCamera->setup(scene->cameras()->inputCameras(),
-		                 Viewport(0, 0, neuralRenderResolution[0], neuralRenderResolution[1]),
-		                 raycaster, {0.1, 100.0});
-
-	const std::shared_ptr<sibr::DeepLearningPointView>
-	    deepLearningPointView(new DeepLearningPointView(scene,
-	                                                    neuralRenderResolution,
-		                                                totalResolution,
-		                                                generalCamera,
-	                                                    myArgs.tensorboard_path.get() + "/neural_renderer/model_" + myArgs.iteration.get(),
-	                                                    myArgs.debug_mode));
-	// Add views to mvm.
-	MultiViewManager        multiViewManager(window, false);
-
-	multiViewManager.addIBRSubView("DL view", deepLearningPointView, totalResolution, ImGuiWindowFlags_ResizeFromAnySide);
-	multiViewManager.addCameraForView("DL view", generalCamera);
-
-
-	/*
-	std::cout << scene->exp_coef_mean << std::endl;
-	std::vector<torch::Tensor> harm_images_stack;
-	for (int idx = 0; idx < scene->cameras()->inputCameras().size(); idx++) {
-		std::tuple<torch::Tensor, torch::Tensor> packed_tensors = scene->_points3d_color_tuple[idx];
-		torch::Tensor image = std::get<1>(packed_tensors).view({ 1, 600, 900, 3 }).permute({ 0, 3, 1, 2 });
-		
-		std::cout << scene->_exp_coefs[idx] << std::endl;
-		torch::Tensor image_harmonized = image * scene->_exp_coefs[idx];
-		image_harmonized += image_harmonized * (1.0 - scene->exp_coef_mean);
-
-		ImageRGB32F image1;
-		image1 = tensorToIm(image_harmonized);
-		std::stringstream stream1;
-		stream1 << myArgs.dataset_path.get() + "/colmap/stereo/images_harmonized/" << scene->cameras()->inputCameras()[idx]->name();
-		image1.save(stream1.str());
-
-		harm_images_stack.push_back(image_harmonized);
-	}
-	torch::Tensor harmonized_grid = makeGrid(torch::TensorList(harm_images_stack), 5);
-
-	ImageRGB32F image1;
-	image1 = tensorToIm(harmonized_grid);
-	std::stringstream stream1;
-	stream1 << "F:/harmonized_grid.png";
-	image1.save(stream1.str());
-	*/
-	/*
-	for (int idx = 0; idx < scene->cameras()->inputCameras().size(); idx++) {
-		const sibr::InputCamera::Ptr cam = scene->cameras()->inputCameras()[idx];
-		std::cout << cam->name() << std::endl;
-		generalCamera->fromCamera(*cam, false, false);
-		window.makeContextCurrent();
-		multiViewManager.onUpdate(sibr::Input::global());
-		multiViewManager.onRender(window);
-		char title[80];
-		sprintf(title, 80, "title_cam_%s.png", cam->name().substr(0, cam->name().find_last_of(".")));
-		multiViewManager.captureView("DL view", "F:/", title);
-		window.swapBuffer();
-		CHECK_GL_ERROR;
-		std::cout << "--------------------" << std::endl;
-
-	}*/
-	if (myArgs.pathFile.get() != "") {
-		generalCamera->getCameraRecorder().loadPath(myArgs.pathFile.get(), neuralRenderResolution[0], neuralRenderResolution[1]);
-		generalCamera->getCameraRecorder().recordOfflinePath(myArgs.outPath, multiViewManager.getIBRSubView("DL view"), "dl");
-		if (!myArgs.noExit)
-			exit(0);
-	}
-
-	// Main looooooop.
-	while (window.isOpened()) {
-
-		sibr::Input::poll();
-		window.makeContextCurrent();
-		if (sibr::Input::global().key().isPressed(sibr::Key::Escape)) {
-			window.close();
-		}
-
-		multiViewManager.onUpdate(sibr::Input::global());
-		multiViewManager.onRender(window);
-
-		window.swapBuffer();
-		CHECK_GL_ERROR;
-	}
-
-	return EXIT_SUCCESS;
-}
diff --git a/pbnr_pytorch/diff_rasterization/rasterize_points.cu b/pbnr_pytorch/diff_rasterization/rasterize_points.cu
index 7c1df72208e9e57d36c0f073836abaffd459c3ae..d4ed86af609bf79d9b50f34107ac1a35dac5d4c2 100644
--- a/pbnr_pytorch/diff_rasterization/rasterize_points.cu
+++ b/pbnr_pytorch/diff_rasterization/rasterize_points.cu
@@ -1,5 +1,3 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
-
 #include <math.h>
 #include <torch/extension.h>
 #include <cstdio>
@@ -76,10 +74,10 @@ __device__ void CheckPixelInsidePoint(
 } // namespace
 
 // ****************************************************************************
-// *                          GK RASTERIZATION                             *
+// *                          Pbnr RASTERIZATION                             *
 // ****************************************************************************
 
-__global__ void OrderPointsGKCudaKernel(
+__global__ void OrderPointsPbnrCudaKernel(
     int32_t* point_idxs, // (N, H, W, K)
     uint32_t* k_idxs,
     const float* points,
@@ -110,7 +108,7 @@ __global__ void OrderPointsGKCudaKernel(
   }
 }
 
-__global__ void BlendPointsGKCudaKernel(
+__global__ void BlendPointsPbnrCudaKernel(
     const float* points, // (P, 3)
     int32_t* point_idx, // (N, H, W, K)
     const float* colors, // (P, C)
@@ -251,7 +249,7 @@ __global__ void BlendPointsGKCudaKernel(
     }
 }
 
-__global__ void RasterizePointsGKCudaKernel(
+__global__ void RasterizePointsPbnrCudaKernel(
     const float* points, // (P, 3)
     const int P,
     uint32_t* k_idxs, // (N, H, W)
@@ -289,7 +287,7 @@ __global__ void RasterizePointsGKCudaKernel(
 }
 
 std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
-RasterizePointsGKCuda(
+RasterizePointsPbnrCuda(
     const torch::Tensor& points, // (P, 3)
     const torch::Tensor& colors, // (P, C)
     const torch::Tensor& point_score, // (P, C)
@@ -332,7 +330,7 @@ RasterizePointsGKCuda(
   const size_t blocks = 1024;
   const size_t threads = 64;
 
-  RasterizePointsGKCudaKernel<<<blocks, threads>>>(
+  RasterizePointsPbnrCudaKernel<<<blocks, threads>>>(
       points.contiguous().data<float>(),
       P,
       (unsigned int *)k_idxs.data<int32_t>(),
@@ -344,7 +342,7 @@ RasterizePointsGKCuda(
 
   cudaDeviceSynchronize();
 
-  BlendPointsGKCudaKernel<<<blocks, threads>>>(
+  BlendPointsPbnrCudaKernel<<<blocks, threads>>>(
       points.contiguous().data<float>(),
       point_idxs.contiguous().data<int32_t>(),
       colors.contiguous().data<float>(),
diff --git a/pbnr_pytorch/diff_rasterization/rasterize_points.h b/pbnr_pytorch/diff_rasterization/rasterize_points.h
index b463e31d7b2972a8b0c7fd62451554dd3728bf9e..f14e3bbe2db05df27b6dd5186ae8ffd3e010d4b8 100644
--- a/pbnr_pytorch/diff_rasterization/rasterize_points.h
+++ b/pbnr_pytorch/diff_rasterization/rasterize_points.h
@@ -6,7 +6,7 @@
 #include <tuple>
 
 std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
-RasterizePointsGKCuda(
+RasterizePointsPbnrCuda(
     const torch::Tensor& points,
     const torch::Tensor& colors,
     const torch::Tensor& point_score,
@@ -124,7 +124,7 @@ RasterizePoints(
     const float znear,
     const float gamma)
 {
-    return RasterizePointsGKCuda(
+    return RasterizePointsPbnrCuda(
         points,
         colors,
         point_score,
diff --git a/pbnr_pytorch/diff_rasterization/soft_depth_test.py b/pbnr_pytorch/diff_rasterization/soft_depth_test.py
index b4dbae677a76614f891d31e9a473c1e24bd9018d..95a1b5dd1d268f037e60a2f0a495396744c96aff 100644
--- a/pbnr_pytorch/diff_rasterization/soft_depth_test.py
+++ b/pbnr_pytorch/diff_rasterization/soft_depth_test.py
@@ -14,39 +14,3 @@ class _SoftDepthTest(torch.autograd.Function):
         )
         prob_map = _C.soft_depth_test(*args)
         return prob_map
-"""
-    @staticmethod
-    def backward(ctx, grad_idx, grad_out_color, grad_depth, grad_mask):
-        grad_points = None
-        grad_colors = None
-        grad_inv_cov = None
-        grad_max_radius = None
-        grad_image_height = None
-        grad_image_width = None
-        grad_points_per_pixel = None
-        grad_bin_size = None
-        grad_zfar = None
-        grad_znear = None
-        grad_gamma = None
-        znear = ctx.znear
-        zfar = ctx.zfar
-        gamma = ctx.gamma
-        max_radius = ctx.max_radius
-        points, colors, inv_cov, idx, k_idxs = ctx.saved_tensors
-        args = (points, colors, inv_cov, max_radius, idx, k_idxs, znear, zfar, gamma, grad_out_color)
-        grad_points, grad_colors, grad_inv_cov = _C.rasterize_points_backward(*args)
-        grads = (
-            grad_points,
-            grad_colors,
-            grad_inv_cov,
-            grad_max_radius,
-            grad_image_height,
-            grad_image_width,
-            grad_points_per_pixel,
-            grad_bin_size,
-            grad_zfar,
-            grad_znear,
-            grad_gamma
-        )
-        return grads
-"""
\ No newline at end of file
diff --git a/pbnr_pytorch/scene_loaders/ibr_scene.py b/pbnr_pytorch/scene_loaders/ibr_scene.py
index 8d5154168b7f3a2114c8d7a4a3777c0ff898b5a6..66bdf089e3832e44fef43deb23be130fb6e7b99c 100644
--- a/pbnr_pytorch/scene_loaders/ibr_scene.py
+++ b/pbnr_pytorch/scene_loaders/ibr_scene.py
@@ -365,19 +365,19 @@ class Scene():
             sys.stdout.write("Loading \"{}\" | Camera {}/{}".format(name, idx, len(scene_info.cameras)))
             sys.stdout.flush()
 
-            depthmap_path = os.path.join(path, "gkIBRScene/depth_maps_type_2/",
+            depthmap_path = os.path.join(path, "pbnrScene/depth_maps_type_2/",
                                          cam_info.image_name + ".ts")
             loaded_depthmap = None
             if os.path.exists(depthmap_path):
                 loaded_depthmap = torch.load(depthmap_path)
 
-            normalmap_path = os.path.join(path, "gkIBRScene/normal_maps/",
+            normalmap_path = os.path.join(path, "pbnrScene/normal_maps/",
                                          cam_info.image_name + ".ts")
             loaded_normalmap = None
             if os.path.exists(normalmap_path):
                 loaded_normalmap = torch.load(normalmap_path)
 
-            image_path = os.path.join(path, "gkIBRScene/images/",
+            image_path = os.path.join(path, "pbnrScene/images/",
                                       cam_info.image_name + ".ts")
             loaded_image = None
             if os.path.exists(image_path):
@@ -429,7 +429,7 @@ class Scene():
         for idx, cam in enumerate(self.cameras):
             self.map_imgfilename_to_idx[os.path.basename(cam.image_name)] = idx
 
-        neighbors_dict_path = os.path.join(path, "gkIBRScene/neighbors_dict.json")
+        neighbors_dict_path = os.path.join(path, "pbnrScene/neighbors_dict.json")
         if os.path.exists(neighbors_dict_path):
             with open(neighbors_dict_path) as json_file:
                 neighbors_dict = json.load(json_file)
diff --git a/pbnr_pytorch/scene_loaders/read_scenes_types.py b/pbnr_pytorch/scene_loaders/read_scenes_types.py
index 44e47e6c2490354bf42934de05b9743080c17d17..fe1a6c7f9a9b4132d7ac5f735d9cfad39d59d1c9 100644
--- a/pbnr_pytorch/scene_loaders/read_scenes_types.py
+++ b/pbnr_pytorch/scene_loaders/read_scenes_types.py
@@ -93,7 +93,7 @@ def readColmapSceneInfo(name, path):
         FovY = 2.0 * math.atan(0.5 * height / focal_length_y)
         FovX = 2.0 * math.atan(0.5 * width / focal_length_x)
 
-        image_path = os.path.join(path, "gkIBRScene/images", extr.name)
+        image_path = os.path.join(path, "pbnrScene/images", extr.name)
         image_name = os.path.basename(image_path).split(".")[0]
         image = Image.open(image_path)
         image = torch.from_numpy(np.array(image)) / 255.0
diff --git a/renderer/CMakeLists.txt b/renderer/CMakeLists.txt
index 31937a4572c876e5d2ebe2b75d2294aa97669d10..6f96285823074458a15a92c4524a54e68ac0c339 100644
--- a/renderer/CMakeLists.txt
+++ b/renderer/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(SIBR_PROJECT "gk")
+set(SIBR_PROJECT "pbnr")
 project(sibr_${SIBR_PROJECT})
 
 file(GLOB SOURCES "*.cpp" "*.h" "*.hpp")
@@ -46,7 +46,7 @@ target_link_libraries(${PROJECT_NAME}
 
 
 # Define export/import flag.
-add_definitions( -DSIBR_EXP_GK_SIBR_EXPORTS -DBOOST_ALL_DYN_LINK  )
+add_definitions( -DSIBR_EXP_PBNR_EXPORTS -DBOOST_ALL_DYN_LINK  )
 
 set_target_properties(${PROJECT_NAME} PROPERTIES FOLDER "projects/${SIBR_PROJECT}/renderer")
 
diff --git a/renderer/Config.hpp b/renderer/Config.hpp
index 2875677c7472e6347b39ffdce0123b3cd894fc02..4a1896d6f0f797bc0a575035f0ed3fd9b649d824 100644
--- a/renderer/Config.hpp
+++ b/renderer/Config.hpp
@@ -1,21 +1,20 @@
-#ifndef __SIBR_GK_SIBR_CONFIG_HPP__
-# define __SIBR_GK_SIBR_CONFIG_HPP__
+#pragma once
 
 # include <core/system/Config.hpp>
 # include <core/system/CommandLineArgs.hpp>
 
 # ifdef SIBR_OS_WINDOWS
-#  ifdef SIBR_STATIC_GK_SIBR_DEFINE
+#  ifdef SIBR_STATIC_DEFINE
 #    define SIBR_EXPORT
 #    define SIBR_NO_EXPORT
 #  else
-#    ifndef SIBR_EXP_GK_SIBR_EXPORT
-#      ifdef SIBR_EXP_GK_SIBR_EXPORTS
+#    ifndef SIBR_EXP_PBNR_BUILDER_EXPORT
+#      ifdef SIBR_EXP_PBNR_EXPORTS
 /* We are building this library */
-#        define SIBR_EXP_GK_SIBR_EXPORT __declspec(dllexport)
+#        define SIBR_EXP_PBNR_EXPORT __declspec(dllexport)
 #      else
 /* We are using this library */
-#        define SIBR_EXP_GK_SIBR_EXPORT __declspec(dllimport)
+#        define SIBR_EXP_PBNR_EXPORT __declspec(dllimport)
 #      endif
 #    endif
 #    ifndef SIBR_NO_EXPORT
@@ -23,13 +22,14 @@
 #    endif
 #  endif
 # else
-#  define SIBR_EXP_GK_SIBR_EXPORT
+#  define SIBR_EXP_PBNR_EXPORT
 # endif
 
+
 namespace sibr {
 
 	/// Arguments for all ULR applications.
-	struct GkIBRAppArgs :
+	struct PbnrAppArgs :
 		virtual BasicIBRAppArgs {
 		Arg<std::string> tensorboard_path = { "tensorboard_path", "" };
 		Arg<std::string> scene_name = { "scene_name", "" };
@@ -51,4 +51,3 @@ namespace sibr {
 
 }
 
-#endif  //__SIBR_EXP_GK_SIBR_CONFIG_HPP__
diff --git a/renderer/DeepLearningPointView.cpp b/renderer/DeepLearningPointView.cpp
deleted file mode 100644
index 4e156d33337f626e506b1453b1eed49824a0f8b9..0000000000000000000000000000000000000000
--- a/renderer/DeepLearningPointView.cpp
+++ /dev/null
@@ -1,415 +0,0 @@
-#include "DeepLearningPointView.hpp"
-#include "projects/torchgl_interop/renderer/torchgl_interop.h"
-#include "./p3d_rasterizer/rasterize_points.h"
-#include "./p3d_rasterizer/soft_depth_test.h"
-#include <torch/script.h>
-#include <math.h>
-#include <c10/cuda/CUDACachingAllocator.h>
-#include <core/graphics/GUI.hpp>
-#include "RenderingUtilities.hpp"
-
-
-namespace sibr
-{
-	DeepLearningPointView::DeepLearningPointView(const GkIBRScene::Ptr scene,
-		const Vector2u& neuralRenderResolution,
-		const Vector2u& totalResolution,
-		const InteractiveCameraHandler::Ptr& camHandle,
-		const std::string& model_path,
-		const int debug_mode) :
-		_scene(scene), _camHandle(camHandle), _debug_mode(debug_mode),
-		_totalResolution(totalResolution), _neuralRenderResolution(neuralRenderResolution)
-	{
-
-		try {
-			// Deserialize the ScriptModule from a file using torch::jit::load().
-			_neural_renderer = torch::jit::load(model_path);
-			_neural_renderer.to(torch::kCUDA);
-			std::cout << "Successfuly loaded model" << std::endl;
-		}
-		catch (const c10::Error& e) {
-			std::cerr << e.msg() << std::endl;
-			std::cerr << "error loading the model\n";
-			SIBR_ERR;
-		}
-
-		//_outTex.reset(new Texture2DRGB32F(sibr::ImageRGB32F(_totalResolution[0], _totalResolution[1]), SIBR_GPU_LINEAR_SAMPLING));
-		_outTex = std::make_shared<sibr::Texture2DRGB32F>(sibr::ImageRGB32F(_totalResolution[0], _totalResolution[1], sibr::Vector3f(1.0f, 0.7f, 0.7f)), SIBR_GPU_LINEAR_SAMPLING);
-		_copyToOutTex = std::make_shared<CopyToTextureOp>(_outTex->handle());
-
-		GLShader::Define::List defines;
-		defines.emplace_back("VERTICAL_FLIP", 1);
-
-		_textureShader.init("TextureShader",
-			sibr::loadFile(sibr::getShadersDirectory() + "/core/texture.vert", defines),
-			sibr::loadFile(sibr::getShadersDirectory() + "/core/texture.frag"));
-
-	}
-
-	torch::Tensor DeepLearningPointView::geom_transform_points(torch::Tensor& points,
-		torch::Tensor& transf_matrix)
-	{
-		torch::Tensor points_hom;
-		if (points.sizes()[1] == 3) {
-			torch::Tensor ones = torch::ones({ points.sizes()[0], 1 }, torch::TensorOptions().device(torch::kCUDA, 0));
-			points_hom = torch::cat(torch::TensorList({ points,  ones }), 1);
-		}
-		if (points.sizes()[1] == 4) {
-			points_hom = points;
-		}
-		torch::Tensor projected_points = torch::matmul(points_hom, transf_matrix);
-		projected_points = projected_points.slice(1, 0, 3) / projected_points.slice(1, 3, 4);
-
-
-		return projected_points;
-	}
-
-	torch::Tensor DeepLearningPointView::geom_transform_vectors(torch::Tensor& vectors,
-		torch::Tensor& transf_matrix)
-	{
-		torch::Tensor vectors_hom;
-		if (vectors.sizes()[1] == 3) {
-			torch::Tensor zeros = torch::zeros({ vectors.sizes()[0], 1 }, torch::TensorOptions().device(torch::kCUDA, 0));
-			vectors_hom = torch::cat(torch::TensorList({ vectors,  zeros }), 1);
-		}
-		if (vectors.sizes()[1] == 4) {
-			vectors_hom = vectors;
-		}
-		torch::Tensor projected_vectors = torch::matmul(vectors_hom, transf_matrix);
-
-		return projected_vectors.slice(1, 0, 3);
-	}
-
-	torch::Tensor DeepLearningPointView::computeJacobian(torch::Tensor& point_cloud,
-		torch::Tensor& normal,
-		const sibr::Camera::Ptr camera)
-	{
-		torch::Tensor s_world, t_world;
-		torch::Tensor o, s, t;
-
-		s_world = torch::ones_like(normal).cuda();
-		s_world.slice(1, 1, 2) = 0.0;
-		s_world.slice(1, 2, 3) = -normal.slice(1, 0, 1) / normal.slice(1, 2, 3);
-		s_world = s_world / s_world.norm(2, 1, true);
-
-		t_world = normal.cross(s_world);
-		t_world = t_world / t_world.norm(2, 1, true);
-
-		torch::Tensor world2view = getP3DWorld2ViewMat(camera).cuda();
-		torch::Tensor proj = getP3DProjMat(camera).cuda();
-
-		o = geom_transform_points(point_cloud, world2view);
-		s = geom_transform_vectors(s_world, world2view);
-		t = geom_transform_vectors(t_world, world2view);
-
-		torch::Tensor jacobian = torch::zeros({ o.sizes()[0], 2, 2 }).cuda();
-		jacobian.slice(1, 0, 1).slice(2, 0, 1) = (s.slice(1, 0, 1) * o.slice(1, 2, 3) - s.slice(1, 2, 3) * o.slice(1, 0, 1)).unsqueeze(1);
-		jacobian.slice(1, 0, 1).slice(2, 1, 2) = (t.slice(1, 0, 1) * o.slice(1, 2, 3) - t.slice(1, 2, 3) * o.slice(1, 0, 1)).unsqueeze(1);
-		jacobian.slice(1, 1, 2).slice(2, 0, 1) = (s.slice(1, 2, 3) * o.slice(1, 1, 2) - s.slice(1, 1, 2) * o.slice(1, 2, 3)).unsqueeze(1);
-		jacobian.slice(1, 1, 2).slice(2, 1, 2) = (t.slice(1, 2, 3) * o.slice(1, 1, 2) - t.slice(1, 1, 2) * o.slice(1, 2, 3)).unsqueeze(1);
-
-		jacobian = (torch::matmul(jacobian, proj.slice(0, 0, 2).slice(1, 0, 2))) / (o.slice(1, 2, 3) * o.slice(1, 2, 3)).unsqueeze(1);
-
-		return jacobian;
-	}
-
-	torch::Tensor DeepLearningPointView::computeInvertedCovariance(torch::Tensor& point_cloud,
-		torch::Tensor& normal,
-		torch::Tensor& uncertainty,
-		const sibr::Camera::Ptr in_camera,
-		const sibr::Camera::Ptr out_camera)
-	{
-		torch::Tensor out_view_jacobian = computeJacobian(point_cloud, normal, out_camera);
-		torch::Tensor in_view_jacobian = computeJacobian(point_cloud, normal, in_camera);
-
-		torch::Tensor full_jacobian = torch::bmm(in_view_jacobian.inverse(), out_view_jacobian);
-		torch::Tensor covariance = torch::bmm(full_jacobian, full_jacobian.transpose(1, 2));
-
-		torch::Tensor uncertainty_matrix = uncertainty.unsqueeze(1) *
-			torch::eye(2, torch::TensorOptions().device(torch::kCUDA, 0)).unsqueeze(0).repeat({ uncertainty.sizes()[0], 1, 1 });
-
-		torch::Tensor scaled_covariance = torch::bmm(covariance, uncertainty_matrix);
-
-		torch::Tensor inverted_covariance = scaled_covariance.inverse();
-		return inverted_covariance;
-	}
-
-	void DeepLearningPointView::onUpdate(Input& input, const Viewport& viewport)
-	{
-		ViewBase::onUpdate(input, viewport);
-	}
-
-	void DeepLearningPointView::projectRGBDCamera(int src_cam_idx, const sibr::Camera& eye, const Vector2u resolution,
-		torch::Tensor& image, torch::Tensor& depth_gmms, torch::Tensor& num_gmms) {
-
-		//src_cam_idx = 17;
-		//sibr::Camera::Ptr eye_override = _scene->cameras()->inputCameras()[1];
-		sibr::Camera::Ptr eye_override = std::make_shared<sibr::Camera>(eye);
-
-		std::tuple<torch::Tensor, torch::Tensor> packed_tensors = _scene->_points3d_color_tuple[src_cam_idx];
-		torch::Tensor point_cloud = std::get<0>(packed_tensors);
-		torch::Tensor point_colors = std::get<1>(packed_tensors) * _scene->_exp_coefs[src_cam_idx];
-		torch::Tensor normals = _scene->_normals[src_cam_idx] / _scene->_normals[src_cam_idx].norm(2, 1, true);
-
-		torch::Tensor pts_transformed = transformPointsToCamera(eye_override, point_cloud);
-
-		/*
-		filter_pos_x = torch.logical_and(viewspace_points[:, 0] < 1.0, viewspace_points[:, 0] > -1.0)
-		filter_pos_y = torch.logical_and(viewspace_points[:, 1] < 1.0, viewspace_points[:, 1] > -1.0)
-		filter_pos = torch.logical_and(filter_pos_x, filter_pos_y)*/
-
-		torch::Tensor in_cam_center = torch::tensor({ { _scene->cameras()->inputCameras()[src_cam_idx]->position().x(),
-														_scene->cameras()->inputCameras()[src_cam_idx]->position().y(),
-														_scene->cameras()->inputCameras()[src_cam_idx]->position().z() } }).cuda();
-		torch::Tensor out_cam_center = torch::tensor({ { eye_override->position().x(),
-														eye_override->position().y(),
-														eye_override->position().z() } }).cuda();
-
-
-		torch::Tensor view_in_ray = (point_cloud.slice(1, 0, 3) - in_cam_center) / (point_cloud.slice(1, 0, 3) - in_cam_center).norm(2, 1, true);
-		torch::Tensor cos_in_view = torch::abs(torch::bmm(normals.view({ -1, 1, 3 }), view_in_ray.view({ -1, 3, 1 })));
-
-		torch::Tensor view_out_ray = (point_cloud.slice(1, 0, 3) - out_cam_center) / (point_cloud.slice(1, 0, 3) - out_cam_center).norm(2, 1, true);
-		torch::Tensor cos_out_view = torch::abs(torch::bmm(normals.view({ -1, 1, 3 }), view_out_ray.view({ -1, 3, 1 })));
-
-		torch::Tensor filter_slanted = torch::logical_and(cos_in_view > 0.1, cos_out_view > 0.1).squeeze();
-
-
-		torch::Tensor inverted_covariance = computeInvertedCovariance(point_cloud.index({ { filter_slanted } }),
-			normals.index({ { filter_slanted } }),
-			_scene->_uncertainties[src_cam_idx].index({ { filter_slanted } }),
-			_scene->cameras()->inputCameras()[src_cam_idx],
-			eye_override);
-
-
-		torch::Tensor pts_features = torch::cat({ point_colors, _scene->_features[src_cam_idx] }, 1);
-		std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
-			rasterize_return_pack = RasterizePoints(pts_transformed.index({ { filter_slanted } }),
-				pts_features.index({ { filter_slanted } }),
-				inverted_covariance,
-				8, resolution[1], resolution[0], 50,
-				_scene->cameras()->inputCameras()[7]->zfar(), _scene->cameras()->inputCameras()[7]->znear(),
-				1.0);
-
-		image = std::get<1>(rasterize_return_pack);
-		depth_gmms = std::get<3>(rasterize_return_pack);
-		num_gmms = std::get<4>(rasterize_return_pack);
-	}
-
-	void DeepLearningPointView::onRenderIBR(sibr::IRenderTarget& dst, const sibr::Camera& eye)
-	{
-
-		std::chrono::steady_clock::time_point start, stop;
-		std::chrono::milliseconds duration;
-
-		torch::Tensor image, depth_gmms, num_gmms, final;
-		torch::Tensor w = torch::tensor({}).cuda();
-		std::vector<torch::Tensor> views_to_render;
-
-		cudaDeviceSynchronize();
-		start = std::chrono::high_resolution_clock::now();
-		std::vector<int> closest4cam;
-
-		int num_cameras = 8 + 1;
-		//closest4cam = getNClosestCameras(eye, 8);
-		//closest4cam = _scene->getNBestCoverageCamerasGPU(eye, _neuralRenderResolution, num_cameras, 1.0 / 4.0, true, -1);
-		//std::cout << closest4cam << std::endl;
-		closest4cam = _scene->getNBestCoverageCameras(eye, _neuralRenderResolution, num_cameras, 1.0/16.0, true, -1);
-		//std::cout << closest4cam << std::endl;
-
-		std::sort(closest4cam.begin(), closest4cam.end());
-
-		for (int idx = 0; idx < _scene->cameras()->inputCameras().size(); idx++) {
-			float alpha = 0.05;
-			float w = 0.0;
-			if (std::find(closest4cam.begin(), closest4cam.end(), idx) != closest4cam.end()) {
-				w = 1.0;
-			}
-			float new_w = alpha * w + (1.0 - alpha) * std::get<0>(_scene->_cam_weights_idx[idx]);
-			_scene->_cam_weights_idx[idx] = std::make_tuple(new_w, idx);
-		}
-
-		std::vector<std::tuple<float, int>> best_cams_sorted(num_cameras);
-		std::partial_sort_copy(std::begin(_scene->_cam_weights_idx), std::end(_scene->_cam_weights_idx), std::begin(best_cams_sorted), std::end(best_cams_sorted),
-			std::greater<std::tuple<float, int>>());
-
-		cudaDeviceSynchronize();
-		stop = std::chrono::high_resolution_clock::now();
-		duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
-		std::cout << "Time taken by choosing cameras: " << duration.count() << " milliseconds" << std::endl;
-
-
-		cudaDeviceSynchronize();
-		start = std::chrono::high_resolution_clock::now();
-		torch::Tensor features_stack = torch::tensor({}).cuda();
-		torch::Tensor depth_gmms_stack = torch::tensor({}).cuda();
-		torch::Tensor num_gmms_stack = torch::tensor({}, torch::TensorOptions().device(torch::kCUDA, 0).dtype(torch::kInt32));
-
-		for (std::tuple<float, int> cam_w_idx : best_cams_sorted) {
-			int dist_cam = std::get<1>(cam_w_idx);
-			float w_cam = std::get<0>(cam_w_idx);
-			projectRGBDCamera(dist_cam, eye, _neuralRenderResolution, image, depth_gmms, num_gmms);
-			image = torch::clamp(image, 0.0, 1.0);
-			//features_stack = torch.cat((features_stack, torch.cat((rendered_point_cloud, depth, mask), dim = 1)), dim = 0)
-			features_stack = torch::cat(torch::TensorList({ features_stack, image }), 0);
-			depth_gmms_stack = torch::cat(torch::TensorList({ depth_gmms_stack, depth_gmms }), 0);
-			num_gmms_stack = torch::cat(torch::TensorList({ num_gmms_stack, num_gmms }), 0);
-			w = torch::cat(torch::TensorList({ w, torch::tensor({w_cam}).unsqueeze(0).cuda() }), 0);
-
-			if (_debug_mode) views_to_render.push_back(image.index({ torch::indexing::Slice(),
-																	 torch::indexing::Slice(torch::indexing::None, 3),
-																	 torch::indexing::Slice(), torch::indexing::Slice() }));
-		}
-		w = w - (w.min() - 0.05);
-		w = w / w.sum();
-
-		cudaDeviceSynchronize();
-		stop = std::chrono::high_resolution_clock::now();
-		duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
-		std::cout << "Time taken by projecting: " << duration.count() << " milliseconds" << std::endl;
-
-
-		cudaDeviceSynchronize();
-		start = std::chrono::high_resolution_clock::now();
-
-
-		torch::Tensor prob_map;
-		prob_map = SoftDepthTest(depth_gmms_stack, num_gmms_stack);
-		//prob_map = torch::ones({ 9, 1, 600, 900 }, torch::TensorOptions().device(torch::kCUDA, 0));
-
-		/*
-		std::cout << prob_map.sizes() << std::endl;
-		for (int i = 0; i < best_cams_sorted.size(); i++) {
-			ImageRGB32F fake_image;
-			torch::Tensor tmp = prob_map.index({ i }).unsqueeze(0);
-			fake_image = tensorToIm(torch::cat({ tmp, tmp, tmp }, 0).unsqueeze(0));
-			std::stringstream stream;
-			stream << "F:/prob_" << i << ".exr";
-			cv::imwrite(stream.str(), fake_image.toOpenCV());
-
-			ImageRGB32F image;
-			std::cout << features_stack.sizes() << std::endl;
-			tmp = features_stack.index({ i }).slice(0, 0, 3).unsqueeze(0);
-			std::cout << tmp.sizes() << std::endl;
-			image = tensorToIm(tmp);
-			std::stringstream stream1;
-			stream1 << "F:/img_" << i << ".png";
-			image.save(stream1.str());
-
-		}
-
-
-		ImageRGB32F fake_image;
-		fake_image = tensorToIm(torch::cat({ prob_map.sum(0).unsqueeze(0).unsqueeze(0), prob_map.sum(0).unsqueeze(0).unsqueeze(0), prob_map.sum(0).unsqueeze(0).unsqueeze(0) }, 1));
-		std::stringstream stream;
-		stream << "F:/prob_sum.exr";
-		cv::imwrite(stream.str(), fake_image.toOpenCV()); 
-		
-		*/
-
-		w = prob_map * w.view({ -1, 1, 1, 1 });
-
-		cudaDeviceSynchronize();
-		stop = std::chrono::high_resolution_clock::now();
-		duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
-		std::cout << "Time taken by depth test: " << duration.count() << " milliseconds" << std::endl;
-
-
-		cudaDeviceSynchronize();
-		start = std::chrono::high_resolution_clock::now();
-		std::vector<torch::jit::IValue> run_inputNet;
-
-		run_inputNet.push_back(features_stack);
-		run_inputNet.push_back(w);
-
-		final = _neural_renderer.forward(run_inputNet).toTensor();
-		final += final * (1.0 - _scene->exp_coef_mean);
-		final = torch::clamp(final, 0.0, 1.0);
-
-		//at::cuda::THCCachingAllocator_emptyCache();
-		//c10::cuda::CUDACachingAllocator::emptyCache();
-		views_to_render.insert(views_to_render.begin(), final);
-		cudaDeviceSynchronize();
-		stop = std::chrono::high_resolution_clock::now();
-		duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
-		std::cout << "Time taken by model: " << duration.count() << " milliseconds" << std::endl;
-
-		cudaDeviceSynchronize();
-		start = std::chrono::high_resolution_clock::now();
-		_copyToOutTex->Compute(makeGrid(torch::TensorList(views_to_render), 3));
-		//_copyToOutTex->Compute(torch::ones({ 1,3,300,1350 }).to(torch::kCUDA));
-		renderTextureRGB(_outTex, dst);
-		stop = std::chrono::high_resolution_clock::now();
-		duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
-		std::cout << "Time taken by copy and render: " << duration.count() << " milliseconds" << std::endl;
-		std::cout << "=========================================" << std::endl;
-	}
-
-	std::vector<int> DeepLearningPointView::getNBestScoreCameras(const sibr::Camera& ref_cam, int N) {
-		std::vector<int> score_cam_vector;
-
-		const Vector3f ref_pos = ref_cam.position();
-		const Vector3f ref_lookAt = ref_cam.dir() + ref_cam.position();
-		float max_dist = _scene->getMaxDistanceCamera(ref_cam);
-		for (int i = 0; i < _scene->cameras()->inputCameras().size(); i++) {
-			sibr::InputCamera::Ptr other_cam = _scene->cameras()->inputCameras()[i];
-			const Vector3f other_pos = other_cam->position();
-			const Vector3f other_lookAt = other_cam->dir() + other_cam->position();
-			float angle = acos(ref_lookAt.dot(other_lookAt) / (ref_lookAt.norm() * other_lookAt.norm()));
-			float dist = (ref_pos - other_pos).norm();
-			float norm_dist = (dist * M_PI) / max_dist;
-			float penalty = angle + norm_dist;
-			score_cam_vector.push_back(i);
-		}
-		std::sort(score_cam_vector.begin(), score_cam_vector.end());
-		std::vector<int> topN(score_cam_vector.begin(), score_cam_vector.begin() + N);
-		return topN;
-	}
-
-	std::vector<int> DeepLearningPointView::getNClosestCameras(const sibr::Camera& ref_cam, int N) {
-		const Vector3f pos = ref_cam.position();
-		std::vector<std::tuple<float, int>> dist_cam_vector;
-		for (int i = 0; i < _scene->cameras()->inputCameras().size(); i++) {
-			sibr::InputCamera::Ptr cam = _scene->cameras()->inputCameras()[i];
-			dist_cam_vector.push_back(std::tuple<float, int>((pos - cam->position()).norm(), i));
-		}
-		std::sort(dist_cam_vector.begin(), dist_cam_vector.end());
-		std::vector<int> topN;
-		for (int i = 0; i < N; i++) {
-			topN.push_back(std::get<1>(dist_cam_vector[i]));
-		}
-		return topN;
-	}
-
-
-
-	void DeepLearningPointView::renderTextureRGB(sibr::Texture2DRGB32F::Ptr tex, IRenderTarget& dst)
-	{
-		// Bind and clear RT.
-		dst.bind();
-		glViewport(0, 0, dst.w(), dst.h());
-		glClearColor(0, 0, 0, 1);
-		glClearDepth(1.0);
-		glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
-
-		// Render the mesh from the current viewpoint, output positions.
-		_textureShader.begin();
-		glActiveTexture(GL_TEXTURE0);
-		glBindTexture(GL_TEXTURE_2D, tex->handle());
-		RenderUtility::renderScreenQuad();
-
-		_textureShader.end();
-		dst.unbind();
-	}
-
-	void DeepLearningPointView::onGUI()
-	{
-		std::string test = "GK_ULR_DL Settings";
-		if (ImGui::Begin(test.c_str())) {
-			if (ImGui::InputInt("Weights Mode", (int*)(&camera_selection_algo), 1, 1)) {
-				camera_selection_algo = std::max(0, std::min(2, camera_selection_algo));
-			}
-			//ImGui::Combo("Weights Mode", (int*)(&camera_selection_algo), "Distance\0Binary MC\0Weighted MC\0\0");
-		}
-	}
-}
diff --git a/renderer/DeepLearningPointView.hpp b/renderer/DeepLearningPointView.hpp
deleted file mode 100644
index bfa9288d657e8fe55e834cea1a86cfd5ecb5d7f7..0000000000000000000000000000000000000000
--- a/renderer/DeepLearningPointView.hpp
+++ /dev/null
@@ -1,69 +0,0 @@
-#pragma once
-#include "Config.hpp"
-#include <core/view/MultiMeshManager.hpp>
-#include "GkIBRScene.hpp"
-#include <core/graphics/Shader.hpp>
-#include <core/graphics/Texture.hpp>
-#include "torch/torch.h"
-#include "projects/torchgl_interop/renderer/CopyToTextureOp.h"
-
-namespace sibr
-{
-	class SIBR_EXP_GK_SIBR_EXPORT DeepLearningPointView : public ViewBase
-	{
-	public:
-		DeepLearningPointView(const GkIBRScene::Ptr scene,
-			const Vector2u& neuralRenderResolution,
-			const Vector2u& totalResolution, 
-			const InteractiveCameraHandler::Ptr& camHandle,
-			const std::string& model_path,
-			const int debug_mode);
-
-		virtual void onUpdate(Input& input, const Viewport& viewport);
-		virtual void onRenderIBR(sibr::IRenderTarget& dst, const sibr::Camera& eye);
-		void renderTextureRGB(sibr::Texture2DRGB32F::Ptr tex, IRenderTarget& dst);
-		void renderTextureRGB2(sibr::Texture2DRGB32F::Ptr tex, const sibr::Camera& eye,
-			IRenderTarget& dst);
-	
-		void onGUI() override;
-		int camera_selection_algo = 0;
-	public:
-
-
-	private:
-		torch::Tensor geom_transform_points(torch::Tensor& points,
-			                                torch::Tensor& transf_matrix);
-		torch::Tensor geom_transform_vectors(torch::Tensor& vectors,
-		                                    torch::Tensor& transf_matrix);
-		torch::Tensor computeJacobian(torch::Tensor& point_cloud,
-		                              torch::Tensor& normal,
-		                              const sibr::Camera::Ptr camera);
-
-		torch::Tensor computeInvertedCovariance(torch::Tensor& point_cloud,
-		                                      	torch::Tensor& normal,
-			                                    torch::Tensor& uncertainty,
-			                                    const sibr::Camera::Ptr in_camera,
-			                                    const sibr::Camera::Ptr out_camera);
-
-		std::vector<int> getNBestScoreCameras(const sibr::Camera& ref_cam, int N);
-		std::vector<int> getNClosestCameras(const sibr::Camera& ref_cam, int N);
-
-		void projectRGBDCamera(int src_cam_idx, const sibr::Camera& eye, const Vector2u resolution,
-			torch::Tensor& image, torch::Tensor& depth, torch::Tensor& mask);
-
-		torch::jit::script::Module _neural_renderer;
-		sibr::GkIBRScene::Ptr _scene;
-		sibr::ImageRGB32F image_test;
-		at::Tensor _output;
-		std::shared_ptr<CopyToTextureOp> _copyToOutTex;
-		Texture2DRGB32F::Ptr _outTex;
-		
-		GLShader _textureShader;
-
-		InteractiveCameraHandler::Ptr _camHandle;
-
-		int _debug_mode;
-		Vector2u _totalResolution;
-		Vector2u _neuralRenderResolution;
-	};
-}
\ No newline at end of file
diff --git a/renderer/DeepLearningPointViewOGL.cpp b/renderer/DeepLearningPointViewOGL.cpp
index 377cff9dc4678d5eeb966be255e974edc17b71c9..91ac63a7d5440dbf7d170d5bbac7a051207f01ae 100644
--- a/renderer/DeepLearningPointViewOGL.cpp
+++ b/renderer/DeepLearningPointViewOGL.cpp
@@ -7,7 +7,7 @@ namespace sibr
 {
 	DeepLearningPointViewOGL::DeepLearningPointViewOGL(
 		sibr::Window::Ptr window,
-		const GkIBRScene::Ptr scene,
+		const PbnrScene::Ptr scene,
 		const Vector2u& neuralRenderResolution,
 		const Vector2u& totalResolution,
 		const InteractiveCameraHandler::Ptr& camHandle,
@@ -176,8 +176,8 @@ namespace sibr
 	{
 		SIBR_LOG << "Re-loading shaders... ";
 		
-		const std::string shaderSrcPath = sibr::getBinDirectory() + "/../../src/projects/gk_sibr_project/renderer/shaders/";
-		const std::string shaderDstPath = sibr::getShadersDirectory() + "/gk/";
+		const std::string shaderSrcPath = sibr::getBinDirectory() + "/../../src/projects/pointbased_neural_rendering/renderer/shaders/";
+		const std::string shaderDstPath = sibr::getShadersDirectory() + "/pbnr/";
 		const auto files = sibr::listFiles(shaderSrcPath, false, false, { "vert", "frag", "geom" });
 		for (const auto& file : files) {
 			sibr::copyFile(shaderSrcPath + file, shaderDstPath + file, true);
@@ -187,26 +187,26 @@ namespace sibr
 		defines.emplace_back("IN_CAM_COUNT", _activeCamCount);
 
 		_depth_shader.init("DepthShader",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/proxy_depth.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/proxy_depth.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/proxy_depth.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/proxy_depth.frag"));
 		
 		_minmax_mip_shader.init("MinMaxMIPShader",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/vertex2D.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/minmax_mip.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/vertex2D.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/minmax_mip.frag"));
 
 		_ewa_point_shader.init("EWAPointShader",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/ewa_point.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/ewa_point.frag"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/ewa_point.geom", defines));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/ewa_point.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/ewa_point.frag"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/ewa_point.geom", defines));
 
 		_compositing_shader.init("CompositingShader",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/vertex2D.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/compositing.frag"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/compositing.geom"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/vertex2D.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/compositing.frag"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/compositing.geom"));
 
 		_summary_shader.init("SummaryShader",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/vertex2D.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/summary.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/vertex2D.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/summary.frag"));
 
 		std::cout << "Done." << std::endl;
 	}
diff --git a/renderer/DeepLearningPointViewOGL.hpp b/renderer/DeepLearningPointViewOGL.hpp
index 52217481fda6577dd8eeefec2db3e07da56cc951..af1790925376dfd7613657c2da0b22261a1af1e0 100644
--- a/renderer/DeepLearningPointViewOGL.hpp
+++ b/renderer/DeepLearningPointViewOGL.hpp
@@ -1,7 +1,7 @@
 #pragma once
 #include "Config.hpp"
 #include <core/view/MultiMeshManager.hpp>
-#include "GkIBRScene.hpp"
+#include "PbnrScene.hpp"
 #include <core/graphics/Shader.hpp>
 #include <core/graphics/Texture.hpp>
 #include "torch/torch.h"
@@ -12,11 +12,11 @@
 
 namespace sibr
 {
-	class SIBR_EXP_GK_SIBR_EXPORT DeepLearningPointViewOGL : public ViewBase
+	class SIBR_EXP_PBNR_EXPORT DeepLearningPointViewOGL : public ViewBase
 	{
 	public:
 		DeepLearningPointViewOGL(sibr::Window::Ptr window,
-								 const GkIBRScene::Ptr scene,
+								 const PbnrScene::Ptr scene,
 		                         const Vector2u& neuralRenderResolution,
 		                         const Vector2u& totalResolution,
 		                         const InteractiveCameraHandler::Ptr& camHandle,
@@ -39,7 +39,7 @@ namespace sibr
 		sibr::Window::Ptr _window;
 
 		torch::jit::script::Module _neural_renderer;
-		sibr::GkIBRScene::Ptr _scene;
+		sibr::PbnrScene::Ptr _scene;
 		InteractiveCameraHandler::Ptr _camHandle;
 		Vector2u _totalResolution;
 		Vector2u _neuralRenderResolution;
diff --git a/renderer/GkIBRScene.cpp b/renderer/PbnrScene.cpp
similarity index 90%
rename from renderer/GkIBRScene.cpp
rename to renderer/PbnrScene.cpp
index 8c512ba8db8726b151cae9b4068b4a4c2a0da32a..e35740d628ddb0bb65bf4e7ee80197b0ef8d6cc8 100644
--- a/renderer/GkIBRScene.cpp
+++ b/renderer/PbnrScene.cpp
@@ -1,4 +1,4 @@
-#include "GkIBRScene.hpp"
+#include "PbnrScene.hpp"
 #include "json.hpp"
 #include "projects/torchgl_interop/renderer/torchgl_interop.h"
 #include "projects/torchgl_interop/renderer/torchgl_interop.h"
@@ -9,28 +9,28 @@
 using json = nlohmann::json;
 
 namespace sibr {
-	GkIBRScene::GkIBRScene() : BasicIBRScene() {
+	PbnrScene::PbnrScene() : BasicIBRScene() {
 	}
 
-	GkIBRScene::GkIBRScene(const GkIBRAppArgs& myArgs, bool preprocess)
+	PbnrScene::PbnrScene(const PbnrAppArgs& myArgs, bool preprocess)
 		: BasicIBRScene(myArgs, true, false)
 	{
 
 		position_depth_shader.init("ULRV3Depth",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/ulr_intersect_gk.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/ulr_intersect_gk.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/ulr_intersect_pbnr.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/ulr_intersect_pbnr.frag"));
 		position_depth_proj.init(position_depth_shader, "proj");
 		position_depth_world2view.init(position_depth_shader, "world2view");
 
 		normal_depth_shader.init("NormalDepth",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/normal_depth.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/normal_depth.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/normal_depth.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/normal_depth.frag"));
 		normal_depth_proj.init(normal_depth_shader, "proj");
 		normal_depth_world2view.init(normal_depth_shader, "world2view");
 
 		score_shader.init("score",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/repro.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/score.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/repro.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/score.frag"));
 		score_shader_sampled_proj.init(score_shader, "sampledCamProj");
 		score_shader_sampled_pos.init(score_shader, "sampledCamPos");
 		score_shader_sampled_dir.init(score_shader, "sampledCamDir");
@@ -40,12 +40,12 @@ namespace sibr {
 		GLShader::Define::List defines;
 		defines.emplace_back("NUM_CAMS", num_cams);
 		compare_shader.init("compare",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/repro.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/compare.frag",defines));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/repro.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/compare.frag",defines));
 		skid_id_uniform.init(compare_shader, "skip_id");
 		maxPool_shader.init("maxPool",
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/repro.vert"),
-			sibr::loadFile(sibr::getShadersDirectory() + "/gk/maxPool.frag"));
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/repro.vert"),
+			sibr::loadFile(sibr::getShadersDirectory() + "/pbnr/maxPool.frag"));
 		maxPool_shader_bestId.init(maxPool_shader, "bestImprId");
 
 		//Precompute 3d Pos textures for camera selection
@@ -68,7 +68,7 @@ namespace sibr {
 				const sibr::InputCamera::Ptr cam = cameras()->inputCameras()[idx];
 
 
-				std::string image_path = myArgs.dataset_path.get() + "/gkIBRScene/images/";
+				std::string image_path = myArgs.dataset_path.get() + "/pbnrScene/images/";
 				makeDirectory(image_path);
 
 				ImageRGB image = images()->inputImages()[idx]->resized(w, h);
@@ -86,7 +86,7 @@ namespace sibr {
 				//getNormalAndDepthMap(*cam, proxies()->proxy(), image.w(), image.h(), normal_and_depth);
 
 
-				std::string depth_map_path = myArgs.dataset_path.get() + "/gkIBRScene/depth_maps_type_2/";
+				std::string depth_map_path = myArgs.dataset_path.get() + "/pbnrScene/depth_maps_type_2/";
 				makeDirectory(depth_map_path);
 				saveTensor(normal_and_depth.slice(1, 3, 4),
 					depth_map_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".ts");
@@ -96,7 +96,7 @@ namespace sibr {
 				depth_image = tensorToIm(torch::cat({ normalizeddepth ,normalizeddepth ,normalizeddepth }, 1));
 				cv::imwrite(depth_map_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".png", depth_image.toOpenCV());
 
-				std::string normal_map_path = myArgs.dataset_path.get() + "/gkIBRScene/normal_maps/";
+				std::string normal_map_path = myArgs.dataset_path.get() + "/pbnrScene/normal_maps/";
 				makeDirectory(normal_map_path);
 				saveTensor(normal_and_depth.slice(1, 0, 3),
 					normal_map_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".ts");
@@ -115,7 +115,7 @@ namespace sibr {
 				}
 				j[cam->name().substr(0, cam->name().find_last_of("."))] = neighbor_names;
 			}
-			std::ofstream o(myArgs.dataset_path.get() + "/gkIBRScene/neighbors_dict.json");
+			std::ofstream o(myArgs.dataset_path.get() + "/pbnrScene/neighbors_dict.json");
 			o << std::setw(4) << j << std::endl;
 			return;
 		}
@@ -132,7 +132,7 @@ namespace sibr {
 		std::string exp_coef_path      = myArgs.tensorboard_path.get() + "/" + myArgs.scene_name.get() + "/exposure_coef/iteration_" + myArgs.iteration.get() + "/";
 
 		readNormalMaps(normal_map_path);
-		readDepthMaps(myArgs.dataset_path.get() + "/gkIBRScene/depth_maps_type_2/", depth_delta_path);
+		readDepthMaps(myArgs.dataset_path.get() + "/pbnrScene/depth_maps_type_2/", depth_delta_path);
 		readImages(images_path);
 		readFeatureMaps(features_path);
 		readUncertainties(uncertainties_path);
@@ -158,7 +158,7 @@ namespace sibr {
 		}
 	}
 
-	torch::Tensor GkIBRScene::readTensorFromDisk(std::string path) {
+	torch::Tensor PbnrScene::readTensorFromDisk(std::string path) {
 		std::ifstream fin(path, std::ios::out | std::ios::binary);
 
 		// get length of file:
@@ -174,14 +174,14 @@ namespace sibr {
 		return x.toTensor();
 	}
 
-	void GkIBRScene::readImages(std::string images_path) {
+	void PbnrScene::readImages(std::string images_path) {
 		for (sibr::InputCamera::Ptr cam : cameras()->inputCameras()) {
 			torch::Tensor imagesTensor = readTensorFromDisk(images_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".img");
 			_images.push_back(imagesTensor.cuda());
 		}
 	}
 
-	void GkIBRScene::readFeatureMaps(std::string features_path) {
+	void PbnrScene::readFeatureMaps(std::string features_path) {
 		if (directoryExists(features_path)) {
 			for (sibr::InputCamera::Ptr cam : cameras()->inputCameras()) {
 				torch::Tensor featuresTensor = readTensorFromDisk(features_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".feat");
@@ -196,7 +196,7 @@ namespace sibr {
 		}
 	}
 
-	void GkIBRScene::readUncertainties(std::string uncertainties_path) {
+	void PbnrScene::readUncertainties(std::string uncertainties_path) {
 		for (sibr::InputCamera::Ptr cam : cameras()->inputCameras()) {
 			if (directoryExists(uncertainties_path)) {
 				torch::Tensor uncertainty = readTensorFromDisk(uncertainties_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".uncertainty");
@@ -209,7 +209,7 @@ namespace sibr {
 		}
 	}
 
-	void GkIBRScene::readExpCoef(std::string exp_coef_path) {
+	void PbnrScene::readExpCoef(std::string exp_coef_path) {
 		for (sibr::InputCamera::Ptr cam : cameras()->inputCameras()) {
 			if (directoryExists(exp_coef_path)) {
 				torch::Tensor exp_coef = readTensorFromDisk(exp_coef_path + cam->name().substr(0, cam->name().find_last_of(".")) + ".exp_coef");
@@ -222,7 +222,7 @@ namespace sibr {
 		}
 	}
 
-	void GkIBRScene::readDepthMaps(std::string depth_map_path, std::string depth_delta_path) {
+	void PbnrScene::readDepthMaps(std::string depth_map_path, std::string depth_delta_path) {
 		for (sibr::InputCamera::Ptr cam : cameras()->inputCameras()) {
 			// Read depth map
 			std::string cam_filename = cam->name().substr(0, cam->name().find_last_of("."));
@@ -241,7 +241,7 @@ namespace sibr {
 		}
 	}
 
-	void GkIBRScene::readNormalMaps(std::string normal_maps_path) {
+	void PbnrScene::readNormalMaps(std::string normal_maps_path) {
 		for (sibr::InputCamera::Ptr cam : cameras()->inputCameras()) {
 			std::string cam_filename = cam->name().substr(0, cam->name().find_last_of("."));
 
@@ -257,7 +257,7 @@ namespace sibr {
 	}
 
 
-	void GkIBRScene::renderPixelPositionsNDC(void) {
+	void PbnrScene::renderPixelPositionsNDC(void) {
 		torch::Tensor x_axis, y_axis;
 		for (int idx = 0; idx < _images.size(); idx++) {
 			// Creates X and Y axises and adds half pixel to center them.
@@ -275,7 +275,7 @@ namespace sibr {
 		}
 	}
 
-	void GkIBRScene::get3DPositionAndDepthMap(const sibr::Camera& ref_cam, const int w, const int h,
+	void PbnrScene::get3DPositionAndDepthMap(const sibr::Camera& ref_cam, const int w, const int h,
 		torch::Tensor& out_renderedTensor) {
 		ImageRGBA32F renderedImage(w, h);
 		sibr::RenderTargetRGBA32F depthRT(w, h);
@@ -284,7 +284,7 @@ namespace sibr {
 		out_renderedTensor = imToTensor(renderedImage);
 	}
 
-	void GkIBRScene::getNormalAndDepthMap(const sibr::Camera& ref_cam, const sibr::Mesh& mesh, const int w, const int h,
+	void PbnrScene::getNormalAndDepthMap(const sibr::Camera& ref_cam, const sibr::Mesh& mesh, const int w, const int h,
 		torch::Tensor& out_renderedTensor) {
 		ImageRGBA32F renderedImage(w, h);
 		sibr::RenderTargetRGBA32F depthRT(w, h);
@@ -294,7 +294,7 @@ namespace sibr {
 	}
 
 	std::tuple < torch::Tensor, torch::Tensor >
-		GkIBRScene::get3DPointCloudFromCamera(static sibr::Camera::Ptr source_cam,
+		PbnrScene::get3DPointCloudFromCamera(static sibr::Camera::Ptr source_cam,
 			static torch::Tensor color,
 			static torch::Tensor depth,
 			static torch::Tensor pixelsNDC,
@@ -328,7 +328,7 @@ namespace sibr {
 		return std::make_tuple(points_wc.index({ { filter } }), points_color.index({ { filter } }));
 	}
 
-	float GkIBRScene::getNonZeroMin(const torch::Tensor t) {
+	float PbnrScene::getNonZeroMin(const torch::Tensor t) {
 		float* ptr = (float*)t.data_ptr();
 		float min = 100000.0;
 		// iterate through all elements
@@ -341,7 +341,7 @@ namespace sibr {
 		return min;
 	}
 
-	void GkIBRScene::renderDepthAtRT(const sibr::Mesh& mesh,
+	void PbnrScene::renderDepthAtRT(const sibr::Mesh& mesh,
 		const sibr::Camera& eye,
 		sibr::RenderTargetRGBA32F& rt)
 	{
@@ -364,7 +364,7 @@ namespace sibr {
 		rt.unbind();
 	}
 
-	void GkIBRScene::renderScoreAtRT(sibr::RenderTargetLum32F& score,
+	void PbnrScene::renderScoreAtRT(sibr::RenderTargetLum32F& score,
 		sibr::RenderTargetRGBA32F& current_pos3D,
 		const sibr::Camera& current_cam,
 		sibr::RenderTargetRGBA32F& sampled_pos3D,
@@ -395,7 +395,7 @@ namespace sibr {
 		score.unbind();
 	}
 
-	int GkIBRScene::renderBestImprovementAtRT(
+	int PbnrScene::renderBestImprovementAtRT(
 		sibr::RenderTargetLum32F& currentScore,
 		sibr::RenderTargetLum32F& newScore,
 		sibr::Texture2DArrayLum32F& individualScores,
@@ -509,7 +509,7 @@ namespace sibr {
 
 
 
-	void GkIBRScene::renderNormalDepthAtRT(const sibr::Mesh& mesh,
+	void PbnrScene::renderNormalDepthAtRT(const sibr::Mesh& mesh,
 		const sibr::Camera& eye,
 		sibr::RenderTargetRGBA32F& rt)
 	{
@@ -532,7 +532,7 @@ namespace sibr {
 		rt.unbind();
 	}
 
-	float GkIBRScene::getMaxDistanceCamera(const sibr::Camera& ref_cam) {
+	float PbnrScene::getMaxDistanceCamera(const sibr::Camera& ref_cam) {
 		float max_dist = 0.0;
 		for (int i = 0; i < cameras()->inputCameras().size(); i++) {
 			sibr::InputCamera::Ptr other_cam = cameras()->inputCameras()[i];
@@ -544,7 +544,7 @@ namespace sibr {
 		return max_dist;
 	}
 
-	std::vector<int> GkIBRScene::getNBestCoverageCameras(const sibr::Camera& ref_cam, Vector2u resolution, const int N,
+	std::vector<int> PbnrScene::getNBestCoverageCameras(const sibr::Camera& ref_cam, Vector2u resolution, const int N,
 		const float scale, const bool weighted, const int skip_id) {
 
 
@@ -680,7 +680,7 @@ namespace sibr {
 		return best_cams;
 	}
 
-	std::vector<int> GkIBRScene::getNBestCoverageCamerasGPU(const sibr::Camera& ref_cam, Vector2u resolution, const int N,
+	std::vector<int> PbnrScene::getNBestCoverageCamerasGPU(const sibr::Camera& ref_cam, Vector2u resolution, const int N,
 		const float scale, const bool weighted, const int skip_id) {
 
 		int width = resolution[0]*scale;
diff --git a/renderer/GkIBRScene.hpp b/renderer/PbnrScene.hpp
similarity index 95%
rename from renderer/GkIBRScene.hpp
rename to renderer/PbnrScene.hpp
index df50c0070143af5cff81d46c39b8819224786561..2e3f7eeff65fc80950233a7740d4451246e14683 100644
--- a/renderer/GkIBRScene.hpp
+++ b/renderer/PbnrScene.hpp
@@ -6,13 +6,13 @@
 
 namespace sibr
 {
-	class SIBR_EXP_GK_SIBR_EXPORT GkIBRScene : public BasicIBRScene
+	class SIBR_EXP_PBNR_EXPORT PbnrScene : public BasicIBRScene
 	{
 	public:
-		SIBR_CLASS_PTR(GkIBRScene);
+		SIBR_CLASS_PTR(PbnrScene);
 
-		GkIBRScene();
-		GkIBRScene(const GkIBRAppArgs& myArgs, bool preprocess = false);
+		PbnrScene();
+		PbnrScene(const PbnrAppArgs& myArgs, bool preprocess = false);
 
 	public:
 		float getMaxDistanceCamera(const sibr::Camera& ref_cam);
diff --git a/renderer/p3d_rasterizer/rasterize_points.cu b/renderer/p3d_rasterizer/rasterize_points.cu
index b883a8a9bf750320bb16c2d2bf87e299d81815d3..3d95b954e6eff6728e35b423ce5e87b1f879a49e 100644
--- a/renderer/p3d_rasterizer/rasterize_points.cu
+++ b/renderer/p3d_rasterizer/rasterize_points.cu
@@ -1,5 +1,3 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
-
 #include <math.h>
 #include <torch/torch.h>
 #include <cstdio>
@@ -35,10 +33,10 @@ namespace {
 }
 
 // ****************************************************************************
-// *                          GK RASTERIZATION                             *
+// *                          Pbnr RASTERIZATION                             *
 // ****************************************************************************
 
-__global__ void OrderPointsGKCudaKernel(
+__global__ void OrderPointsPbnrCudaKernel(
     int32_t* point_idxs, // (N, H, W, K)
     uint32_t* k_idxs,
     const float* points,
@@ -70,7 +68,7 @@ __global__ void OrderPointsGKCudaKernel(
 }
 
 
-__global__ void BlendPoints_adaptivebinning_GKCudaKernel(
+__global__ void BlendPoints_adaptivebinning_PbnrCudaKernel(
     const float* points, // (P, 3)
     int32_t* point_idx, // (N, H, W, K)
     const float* colors, // (P, C)
@@ -221,7 +219,7 @@ __global__ void BlendPoints_adaptivebinning_GKCudaKernel(
 }
 
 
-__global__ void BlendPointsGKCudaKernel(
+__global__ void BlendPointsPbnrCudaKernel(
     const float* points, // (P, 3)
     int32_t* point_idx, // (N, H, W, K)
     const float* colors, // (P, C)
@@ -363,7 +361,7 @@ __global__ void BlendPointsGKCudaKernel(
     }
 }
 
-__global__ void RasterizePointsGKCudaKernel(
+__global__ void RasterizePointsPbnrCudaKernel(
     const float* points, // (P, 3)
     const int P,
     uint32_t* k_idxs, // (N, H, W)
@@ -376,7 +374,7 @@ __global__ void RasterizePointsGKCudaKernel(
     // Simple version: One thread per output pixel
     const int num_threads = gridDim.x * blockDim.x;
     const int tid = blockDim.x * blockIdx.x + threadIdx.x;
-    // TODO gkopanas more than 1 batches?
+    // TODO Pbnropanas more than 1 batches?
     for (int i = tid; i < P; i += num_threads) {
         const float px_ndc = points[i * 3 + 0];
         const float py_ndc = points[i * 3 + 1];
@@ -401,7 +399,7 @@ __global__ void RasterizePointsGKCudaKernel(
 }
 
 std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
-RasterizePointsGKCuda(
+RasterizePointsPbnrCuda(
     const torch::Tensor& points, // (P, 3)
     const torch::Tensor& colors, // (P, C)
     const torch::Tensor& inv_cov, // (P, 4)
@@ -442,7 +440,7 @@ RasterizePointsGKCuda(
     const size_t blocks = 1024;
     const size_t threads = 64;
 
-    RasterizePointsGKCudaKernel << <blocks, threads >> > (
+    RasterizePointsPbnrCudaKernel << <blocks, threads >> > (
         points.contiguous().data<float>(),
         P,
         (unsigned int*)k_idxs.data<int32_t>(),
@@ -454,7 +452,7 @@ RasterizePointsGKCuda(
 
     cudaDeviceSynchronize();
 
-    BlendPointsGKCudaKernel << <blocks, threads >> > (
+    BlendPointsPbnrCudaKernel << <blocks, threads >> > (
         points.contiguous().data<float>(),
         point_idxs.contiguous().data<int32_t>(),
         colors.contiguous().data<float>(),
diff --git a/renderer/p3d_rasterizer/rasterize_points.h b/renderer/p3d_rasterizer/rasterize_points.h
index 7d90de3f8d8c9478be3a1ec8952a55a81313fdff..cf1ab398680c751abe274636578c478eff93b25b 100644
--- a/renderer/p3d_rasterizer/rasterize_points.h
+++ b/renderer/p3d_rasterizer/rasterize_points.h
@@ -1,5 +1,3 @@
-// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
-
 #pragma once
 #include <torch/torch.h>
 #include <cstdio>
@@ -7,7 +5,7 @@
 
 
 std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
-RasterizePointsGKCuda(
+RasterizePointsPbnrCuda(
     const torch::Tensor& points,
     const torch::Tensor& colors,
     const torch::Tensor& inv_cov,
@@ -34,7 +32,7 @@ RasterizePoints(
     const float znear,
     const float gamma)
 {
-    return RasterizePointsGKCuda(
+    return RasterizePointsPbnrCuda(
         points,
         colors,
         inv_cov,
diff --git a/renderer/shaders/ulr_intersect_gk.frag b/renderer/shaders/ulr_intersect_pbnr.frag
similarity index 100%
rename from renderer/shaders/ulr_intersect_gk.frag
rename to renderer/shaders/ulr_intersect_pbnr.frag
diff --git a/renderer/shaders/ulr_intersect_gk.vert b/renderer/shaders/ulr_intersect_pbnr.vert
similarity index 100%
rename from renderer/shaders/ulr_intersect_gk.vert
rename to renderer/shaders/ulr_intersect_pbnr.vert