Mentions légales du service
Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
P
pointbased_neural_rendering
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Package registry
Model registry
Operate
Terraform modules
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Admin message
GitLab upgrade completed. Current version is 17.11.3.
Show more breadcrumbs
SIBR
Projects
pointbased_neural_rendering
Commits
79f874a3
Commit
79f874a3
authored
3 years ago
by
KOPANAS Georgios
Browse files
Options
Downloads
Patches
Plain Diff
simplify cmd line arguments
parent
1e721344
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
pbnr_pytorch/scene_loaders/ibr_scene.py
+58
-68
58 additions, 68 deletions
pbnr_pytorch/scene_loaders/ibr_scene.py
pbnr_pytorch/test_path.py
+5
-6
5 additions, 6 deletions
pbnr_pytorch/test_path.py
pbnr_pytorch/train_full_pipeline.py
+8
-10
8 additions, 10 deletions
pbnr_pytorch/train_full_pipeline.py
with
71 additions
and
84 deletions
pbnr_pytorch/scene_loaders/ibr_scene.py
+
58
−
68
View file @
79f874a3
...
...
@@ -196,7 +196,7 @@ class Camera(nn.Module):
return
points_wc
,
point_features
def
render
(
self
,
viewpoint_camera
,
gamma
,
ewa
=
False
):
def
render
(
self
,
viewpoint_camera
,
gamma
):
#torch.cuda.synchronize()
#time_begin = time.time()
...
...
@@ -248,77 +248,67 @@ class Camera(nn.Module):
#torch.cuda.synchronize()
#time_begin = time.time()
if
ewa
:
normal_map
=
self
.
normal_map
.
to
(
"
cuda
"
)
normal_map
=
normal_map
.
view
(
3
,
normal_map
.
shape
[
2
]
*
normal_map
.
shape
[
3
]).
permute
(
1
,
0
)
normal_map
=
normal_map
/
normal_map
.
norm
(
2
,
dim
=
1
,
keepdim
=
True
)
filter_pos_x
=
torch
.
logical_and
(
viewspace_points
[:,
0
]
<
1.0
,
viewspace_points
[:,
0
]
>
-
1.0
)
filter_pos_y
=
torch
.
logical_and
(
viewspace_points
[:,
1
]
<
1.0
,
viewspace_points
[:,
1
]
>
-
1.0
)
filter_pos
=
torch
.
logical_and
(
filter_pos_x
,
filter_pos_y
)
view_in_ray
=
(
point_cloud
-
self
.
camera_center
)
/
(
point_cloud
-
self
.
camera_center
).
norm
(
dim
=
1
,
keepdim
=
True
)
cos_in_view
=
torch
.
abs
(
torch
.
bmm
(
normal_map
.
view
(
-
1
,
1
,
3
),
view_in_ray
.
view
(
-
1
,
3
,
1
)))
view_out_ray
=
(
point_cloud
-
viewpoint_camera
.
camera_center
)
/
(
point_cloud
-
viewpoint_camera
.
camera_center
).
norm
(
dim
=
1
,
keepdim
=
True
)
cos_out_view
=
torch
.
abs
(
torch
.
bmm
(
normal_map
.
view
(
-
1
,
1
,
3
),
view_out_ray
.
view
(
-
1
,
3
,
1
)))
filter_slanted
=
torch
.
logical_and
(
cos_in_view
>
0.1
,
cos_out_view
>
0.1
).
squeeze
()
filter
=
torch
.
logical_and
(
filter_pos
,
filter_slanted
)
filtered_pc
=
point_cloud
[
filter
]
filtered_normals
=
normal_map
[
filter
]
filtered_uncertainty
=
uncertainty_map_scale_matrix
[
filter
]
if
filtered_pc
.
shape
[
0
]
==
0
:
return
(
torch
.
zeros
(
1
,
features
.
shape
[
1
],
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
device
=
"
cuda
"
),
torch
.
cat
([
10000.0
*
torch
.
ones
((
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
102
,
1
),
device
=
"
cuda
"
),
torch
.
ones
((
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
102
,
1
),
device
=
"
cuda
"
)],
dim
=-
1
),
torch
.
ones
((
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
1
)).
cuda
(),
torch
.
zeros
(
1
,
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
device
=
"
cuda
"
))
covariance
=
self
.
computeCovariance
(
filtered_pc
,
filtered_normals
,
viewpoint_camera
)
scaled_covariance
=
torch
.
bmm
(
covariance
,
filtered_uncertainty
)
with
torch
.
no_grad
():
eigenval
=
np
.
linalg
.
eigvals
(
scaled_covariance
.
cpu
())
distortion
=
eigenval
.
min
(
axis
=
1
)
/
eigenval
.
max
(
axis
=
1
)
dist_torch
=
torch
.
tensor
(
distortion
).
cuda
()
ninetinth
=
np
.
percentile
(
eigenval
.
flatten
(),
95
)
pixel_sigma
=
int
(
np
.
ceil
(
3
*
np
.
sqrt
(
ninetinth
)))
inv_cov
=
scaled_covariance
.
inverse
()
#torch.cuda.synchronize()
#time_end = time.time()
#print("Cov: {} | ".format(time_end - time_begin), end="")
#torch.cuda.synchronize()
#time_begin = time.time()
col_image
,
depth_gmms
,
num_gmms
,
mask
=
rasterizer
(
viewspace_points
[
filter
],
features
[
filter
],
dist_torch
,
inv_cov
,
pixel_sigma
)
#torch.cuda.synchronize()
#time_end = time.time()
#print("Rast: {} | ".format(time_end - time_begin), end="")
else
:
inv_cov
=
uncertainty_map_scale_matrix
*
(
1.0
/
(
0.66
*
0.66
))
col_image
,
depth_gmms
,
num_gmms
,
mask
=
rasterizer
(
viewspace_points
,
features
,
inv_cov
,
self
.
max_radius
)
#torch.cuda.synchronize()
#time_end = time.time()
#print("Rast: {} | ".format(time_end - time_begin), end="")
normal_map
=
self
.
normal_map
.
to
(
"
cuda
"
)
normal_map
=
normal_map
.
view
(
3
,
normal_map
.
shape
[
2
]
*
normal_map
.
shape
[
3
]).
permute
(
1
,
0
)
normal_map
=
normal_map
/
normal_map
.
norm
(
2
,
dim
=
1
,
keepdim
=
True
)
filter_pos_x
=
torch
.
logical_and
(
viewspace_points
[:,
0
]
<
1.0
,
viewspace_points
[:,
0
]
>
-
1.0
)
filter_pos_y
=
torch
.
logical_and
(
viewspace_points
[:,
1
]
<
1.0
,
viewspace_points
[:,
1
]
>
-
1.0
)
filter_pos
=
torch
.
logical_and
(
filter_pos_x
,
filter_pos_y
)
view_in_ray
=
(
point_cloud
-
self
.
camera_center
)
/
(
point_cloud
-
self
.
camera_center
).
norm
(
dim
=
1
,
keepdim
=
True
)
cos_in_view
=
torch
.
abs
(
torch
.
bmm
(
normal_map
.
view
(
-
1
,
1
,
3
),
view_in_ray
.
view
(
-
1
,
3
,
1
)))
view_out_ray
=
(
point_cloud
-
viewpoint_camera
.
camera_center
)
/
(
point_cloud
-
viewpoint_camera
.
camera_center
).
norm
(
dim
=
1
,
keepdim
=
True
)
cos_out_view
=
torch
.
abs
(
torch
.
bmm
(
normal_map
.
view
(
-
1
,
1
,
3
),
view_out_ray
.
view
(
-
1
,
3
,
1
)))
filter_slanted
=
torch
.
logical_and
(
cos_in_view
>
0.1
,
cos_out_view
>
0.1
).
squeeze
()
filter
=
torch
.
logical_and
(
filter_pos
,
filter_slanted
)
filtered_pc
=
point_cloud
[
filter
]
filtered_normals
=
normal_map
[
filter
]
filtered_uncertainty
=
uncertainty_map_scale_matrix
[
filter
]
if
filtered_pc
.
shape
[
0
]
==
0
:
return
(
torch
.
zeros
(
1
,
features
.
shape
[
1
],
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
device
=
"
cuda
"
),
torch
.
cat
([
10000.0
*
torch
.
ones
((
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
102
,
1
),
device
=
"
cuda
"
),
torch
.
ones
((
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
102
,
1
),
device
=
"
cuda
"
)],
dim
=-
1
),
torch
.
ones
((
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
1
)).
cuda
(),
torch
.
zeros
(
1
,
1
,
viewpoint_camera
.
image_height
,
viewpoint_camera
.
image_width
,
device
=
"
cuda
"
))
covariance
=
self
.
computeCovariance
(
filtered_pc
,
filtered_normals
,
viewpoint_camera
)
scaled_covariance
=
torch
.
bmm
(
covariance
,
filtered_uncertainty
)
with
torch
.
no_grad
():
eigenval
=
np
.
linalg
.
eigvals
(
scaled_covariance
.
cpu
())
distortion
=
eigenval
.
min
(
axis
=
1
)
/
eigenval
.
max
(
axis
=
1
)
dist_torch
=
torch
.
tensor
(
distortion
).
cuda
()
ninetinth
=
np
.
percentile
(
eigenval
.
flatten
(),
95
)
pixel_sigma
=
int
(
np
.
ceil
(
3
*
np
.
sqrt
(
ninetinth
)))
inv_cov
=
scaled_covariance
.
inverse
()
#torch.cuda.synchronize()
#time_end = time.time()
#print("Cov: {} | ".format(time_end - time_begin), end="")
#torch.cuda.synchronize()
#time_begin = time.time()
col_image
,
depth_gmms
,
num_gmms
,
mask
=
rasterizer
(
viewspace_points
[
filter
],
features
[
filter
],
dist_torch
,
inv_cov
,
pixel_sigma
)
#torch.cuda.synchronize()
#time_end = time.time()
#print("Rast: {} | ".format(time_end - time_begin), end="")
return
col_image
,
depth_gmms
,
num_gmms
,
mask
def
render_patch
(
self
,
viewpoint_camera
,
patch_size_y
,
patch_size_x
,
patch_origin_x
,
patch_origin_y
,
gamma
,
ewa
=
False
):
gamma
):
pixel_size_y
=
(
2
*
math
.
tan
(
viewpoint_camera
.
FoVy
/
2
))
/
viewpoint_camera
.
image_height
pixel_size_x
=
(
2
*
math
.
tan
(
viewpoint_camera
.
FoVx
/
2
))
/
viewpoint_camera
.
image_width
cx
=
viewpoint_camera
.
image_width
/
2
...
...
@@ -334,7 +324,7 @@ class Camera(nn.Module):
zfar
=
viewpoint_camera
.
zfar
,
image_height
=
patch_size_y
,
image_width
=
patch_size_x
)
return
self
.
render
(
viewpoint_camera
=
camera_for_patch
,
gamma
=
gamma
,
ewa
=
ewa
)
return
self
.
render
(
viewpoint_camera
=
camera_for_patch
,
gamma
=
gamma
)
class
Scene
():
...
...
This diff is collapsed.
Click to expand it.
pbnr_pytorch/test_path.py
+
5
−
6
View file @
79f874a3
...
...
@@ -11,7 +11,7 @@ import json
import
numpy
as
np
import
math
def
render_viewpoint2
(
viewpoint_camera
,
pcloud_cameras
,
patch
=
None
,
ewa
=
False
,
gamma
=
1.0
):
def
render_viewpoint2
(
viewpoint_camera
,
pcloud_cameras
,
patch
=
None
,
gamma
=
1.0
):
if
patch
==
None
:
patch
=
(
0
,
0
,
viewpoint_camera
.
image_width
,
viewpoint_camera
.
image_height
)
patch_origin_x
,
patch_origin_y
,
patch_size_x
,
patch_size_y
=
patch
...
...
@@ -25,7 +25,7 @@ def render_viewpoint2(viewpoint_camera, pcloud_cameras, patch=None, ewa=False, g
rendered_point_cloud
,
depth_gmms
,
num_gmms
,
blend_scores
=
pcloud_cam
.
render_patch
(
viewpoint_camera
=
viewpoint_camera
,
patch_origin_x
=
patch_origin_x
,
patch_size_x
=
patch_size_x
,
patch_origin_y
=
patch_origin_y
,
patch_size_y
=
patch_size_y
,
gamma
=
gamma
,
ewa
=
ewa
)
gamma
=
gamma
)
color_stack
=
torch
.
cat
((
color_stack
,
rendered_point_cloud
),
dim
=
0
)
blend_scores_stack
=
torch
.
cat
((
blend_scores_stack
,
blend_scores
),
dim
=
0
)
features_stack
=
torch
.
cat
((
features_stack
,
rendered_point_cloud
),
dim
=
0
)
...
...
@@ -40,7 +40,7 @@ def render_viewpoint2(viewpoint_camera, pcloud_cameras, patch=None, ewa=False, g
image
=
neural_renderer
(
features_stack
,
prob_map
*
blend_scores_stack
)
return
image
,
color_stack
,
l2_stack
.
mean
()
def
render_viewpoint
(
viewpoint_camera
,
pcloud_cameras
,
patch
=
None
,
ewa
=
False
,
gamma
=
1.0
):
def
render_viewpoint
(
viewpoint_camera
,
pcloud_cameras
,
patch
=
None
,
gamma
=
1.0
):
print
(
"
render_viewpoint
"
)
print
(
viewpoint_camera
.
projection_matrix
)
features_stack
=
torch
.
tensor
([]).
to
(
device
)
...
...
@@ -51,7 +51,7 @@ def render_viewpoint(viewpoint_camera, pcloud_cameras, patch=None, ewa=False, ga
blend_scores_stack
=
torch
.
tensor
([]).
to
(
device
)
for
idx
,
pcloud_cam
in
enumerate
(
pcloud_cameras
):
rendered_point_cloud
,
depth_gmms
,
num_gmms
,
blend_scores
=
pcloud_cam
.
render
(
viewpoint_camera
=
viewpoint_camera
,
gamma
=
gamma
,
ewa
=
ewa
)
gamma
=
gamma
)
color_stack
=
torch
.
cat
((
color_stack
,
rendered_point_cloud
),
dim
=
0
)
blend_scores_stack
=
torch
.
cat
((
blend_scores_stack
,
blend_scores
),
dim
=
0
)
features_stack
=
torch
.
cat
((
features_stack
,
rendered_point_cloud
),
dim
=
0
)
...
...
@@ -121,7 +121,6 @@ parser.add_argument('--load_iter', required=False, type=int, default=None)
parser
.
add_argument
(
'
--max_radius
'
,
required
=
False
,
type
=
int
,
default
=
8
)
parser
.
add_argument
(
'
--test_cameras
'
,
required
=
False
,
default
=
3
)
parser
.
add_argument
(
'
--extra_features
'
,
type
=
int
,
default
=
6
)
parser
.
add_argument
(
'
--sample_views
'
,
action
=
'
store_true
'
,
dest
=
'
sample_views
'
)
args
=
parser
.
parse_args
()
...
...
@@ -150,7 +149,7 @@ with torch.no_grad():
train_viewpoint
=
scene
.
getAllTrainCameras
()[
2
]
train_pcloud_cams
=
scene
.
getPCloudCamsForScore
(
train_viewpoint
,
args
.
sample_views
)
train_pcloud_cams
=
scene
.
getPCloudCamsForScore
(
train_viewpoint
,
False
)
image
,
image_stack
,
_
=
render_viewpoint
(
cameras_path
[
0
],
train_pcloud_cams
,
None
,
True
)
print
(
"
path
"
)
print
(
cameras_path
[
0
].
world_view_transform
)
...
...
This diff is collapsed.
Click to expand it.
pbnr_pytorch/train_full_pipeline.py
+
8
−
10
View file @
79f874a3
...
...
@@ -30,7 +30,7 @@ class F:
sys
.
stdout
=
F
()
def
render_viewpoint
(
viewpoint_camera
,
pcloud_cameras
,
patch
=
None
,
ewa
=
False
,
gamma
=
1.0
):
def
render_viewpoint
(
viewpoint_camera
,
pcloud_cameras
,
patch
=
None
,
gamma
=
1.0
):
if
patch
==
None
:
patch
=
(
0
,
0
,
viewpoint_camera
.
image_width
,
viewpoint_camera
.
image_height
)
patch_origin_x
,
patch_origin_y
,
patch_size_x
,
patch_size_y
=
patch
...
...
@@ -44,7 +44,7 @@ def render_viewpoint(viewpoint_camera, pcloud_cameras, patch=None, ewa=False, ga
rendered_point_cloud
,
depth_gmms
,
num_gmms
,
blend_scores
=
pcloud_cam
.
render_patch
(
viewpoint_camera
=
viewpoint_camera
,
patch_origin_x
=
patch_origin_x
,
patch_size_x
=
patch_size_x
,
patch_origin_y
=
patch_origin_y
,
patch_size_y
=
patch_size_y
,
gamma
=
gamma
,
ewa
=
ewa
)
gamma
=
gamma
)
color_stack
=
torch
.
cat
((
color_stack
,
rendered_point_cloud
),
dim
=
0
)
blend_scores_stack
=
torch
.
cat
((
blend_scores_stack
,
blend_scores
),
dim
=
0
)
features_stack
=
torch
.
cat
((
features_stack
,
rendered_point_cloud
),
dim
=
0
)
...
...
@@ -86,10 +86,8 @@ parser.add_argument('--skip_validation', action='store_true', dest='skip_validat
parser
.
add_argument
(
'
--extra_features
'
,
type
=
int
,
default
=
6
)
parser
.
add_argument
(
'
--max_radius
'
,
required
=
False
,
type
=
int
,
default
=
8
)
parser
.
add_argument
(
'
--sample_views
'
,
action
=
'
store_true
'
,
dest
=
'
sample_views
'
)
parser
.
add_argument
(
'
--test_cameras
'
,
required
=
False
,
default
=
3
)
parser
.
add_argument
(
'
--ewa
'
,
action
=
'
store_true
'
,
dest
=
'
ewa
'
)
parser
.
add_argument
(
'
--input_views
'
,
required
=
False
,
default
=
9
)
args
=
parser
.
parse_args
()
...
...
@@ -161,7 +159,7 @@ while True:
viewpoint_idx
=
randint
(
0
,
len
(
viewpoint_stack
)
-
1
)
viewpoint_cam
=
viewpoint_stack
.
pop
(
viewpoint_idx
)
pcloud_cams
=
scene
.
getPCloudCamsForScore
(
viewpoint_cam
,
args
.
sample
_views
)
pcloud_cams
=
scene
.
getPCloudCamsForScore
(
viewpoint_cam
,
sample
=
True
)
random
.
shuffle
(
pcloud_cams
)
# Zero out the gradients
...
...
@@ -176,7 +174,7 @@ while True:
patch_size
=
150
patch
=
(
randint
(
0
,
viewpoint_cam
.
image_width
-
patch_size
),
randint
(
0
,
viewpoint_cam
.
image_height
-
patch_size
),
patch_size
,
patch_size
)
image
,
image_stack
,
l2
=
render_viewpoint
(
viewpoint_cam
,
pcloud_cams
,
patch
,
args
.
ewa
)
image
,
image_stack
,
l2
=
render_viewpoint
(
viewpoint_cam
,
pcloud_cams
,
patch
)
gt_image
=
viewpoint_cam
.
getImageHarmonized
().
to
(
device
)
gt_image
=
crop_image
(
gt_image
,
patch
)
...
...
@@ -218,9 +216,9 @@ while True:
for
test_viewpoint
in
scene
.
getAllTestCameras
():
torch
.
cuda
.
empty_cache
()
test_pcloud_cams
=
scene
.
getPCloudCamsForScore
(
test_viewpoint
,
args
.
sample
_views
)
test_pcloud_cams
=
scene
.
getPCloudCamsForScore
(
test_viewpoint
,
sample
=
False
)
random
.
shuffle
(
test_pcloud_cams
)
image
,
image_stack
,
_
=
render_viewpoint
(
test_viewpoint
,
test_pcloud_cams
,
None
,
args
.
ewa
)
image
,
image_stack
,
_
=
render_viewpoint
(
test_viewpoint
,
test_pcloud_cams
,
None
)
gt_image
=
test_viewpoint
.
getImageHarmonized
().
to
(
"
cuda
"
)
Ll1
=
l1_loss
(
image
,
gt_image
)
...
...
@@ -253,9 +251,9 @@ while True:
for
train_viewpoint
in
validation_cameras
:
torch
.
cuda
.
empty_cache
()
train_pcloud_cams
=
scene
.
getPCloudCamsForScore
(
train_viewpoint
,
args
.
sample
_views
)
train_pcloud_cams
=
scene
.
getPCloudCamsForScore
(
train_viewpoint
,
sample
=
False
)
random
.
shuffle
(
train_pcloud_cams
)
image
,
image_stack
,
_
=
render_viewpoint
(
train_viewpoint
,
train_pcloud_cams
,
None
,
args
.
ewa
)
image
,
image_stack
,
_
=
render_viewpoint
(
train_viewpoint
,
train_pcloud_cams
,
None
)
gt_image
=
train_viewpoint
.
getImageHarmonized
().
to
(
"
cuda
"
)
Ll1
=
l1_loss
(
image
,
gt_image
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment