Spaces:
Build error
Build error
Commit
ยท
b5be88d
1
Parent(s):
e4a653e
downsample output
Browse files- __pycache__/fusion.cpython-37.pyc +0 -0
- app.py +7 -6
- helpers.py +1 -1
- images/08/000295.jpg +0 -0
- images/08/001385.jpg +0 -0
- monoscene/__pycache__/CRP3D.cpython-37.pyc +0 -0
- monoscene/__pycache__/DDR.cpython-37.pyc +0 -0
- monoscene/__pycache__/__init__.cpython-37.pyc +0 -0
- monoscene/__pycache__/config.cpython-37.pyc +0 -0
- monoscene/__pycache__/flosp.cpython-37.pyc +0 -0
- monoscene/__pycache__/modules.cpython-37.pyc +0 -0
- monoscene/__pycache__/monoscene.cpython-37.pyc +0 -0
- monoscene/__pycache__/monoscene_model.cpython-37.pyc +0 -0
- monoscene/__pycache__/unet2d.cpython-37.pyc +0 -0
- monoscene/__pycache__/unet3d_kitti.cpython-37.pyc +0 -0
- monoscene/__pycache__/unet3d_nyu.cpython-37.pyc +0 -0
- monoscene/monoscene.py +4 -4
__pycache__/fusion.cpython-37.pyc
DELETED
|
Binary file (14.9 kB)
|
|
|
app.py
CHANGED
|
@@ -46,18 +46,17 @@ def predict(img):
|
|
| 46 |
|
| 47 |
pred = model(batch).squeeze()
|
| 48 |
# print(pred.shape)
|
| 49 |
-
|
| 50 |
-
fig = draw(pred, batch['
|
| 51 |
|
| 52 |
|
| 53 |
return fig
|
| 54 |
|
| 55 |
-
# The output is <b>downsampled by 2</b> to be able to be rendered in browsers.
|
| 56 |
|
| 57 |
description = """
|
| 58 |
MonoScene Demo on SemanticKITTI Validation Set (Sequence 08), which uses the <b>camera parameters of Sequence 08</b>.
|
| 59 |
Due to the <b>CPU-only</b> inference, it might take up to 20s to predict a scene. \n
|
| 60 |
-
<b>Darker</b> colors represent the <b>scenery outside the Field of View</b>, i.e. not visible on the image.
|
| 61 |
<center>
|
| 62 |
<a href="https://cv-rits.github.io/MonoScene/">
|
| 63 |
<img style="display:inline" alt="Project page" src="https://img.shields.io/badge/Project%20Page-MonoScene-red">
|
|
@@ -74,7 +73,9 @@ article="""
|
|
| 74 |
"""
|
| 75 |
|
| 76 |
examples = [
|
| 77 |
-
'images/08/
|
|
|
|
|
|
|
| 78 |
'images/08/000085.jpg',
|
| 79 |
'images/08/000290.jpg',
|
| 80 |
'images/08/000465.jpg',
|
|
@@ -83,10 +84,10 @@ examples = [
|
|
| 83 |
'images/08/001380.jpg',
|
| 84 |
'images/08/001530.jpg',
|
| 85 |
'images/08/002360.jpg',
|
| 86 |
-
'images/08/002505.jpg',
|
| 87 |
'images/08/004059.jpg',
|
| 88 |
'images/08/003149.jpg',
|
| 89 |
'images/08/001446.jpg',
|
|
|
|
| 90 |
'images/08/001122.jpg',
|
| 91 |
'images/08/003533.jpg',
|
| 92 |
'images/08/003365.jpg',
|
|
|
|
| 46 |
|
| 47 |
pred = model(batch).squeeze()
|
| 48 |
# print(pred.shape)
|
| 49 |
+
pred = majority_pooling(pred, k_size=2)
|
| 50 |
+
fig = draw(pred, batch['fov_mask_2'])
|
| 51 |
|
| 52 |
|
| 53 |
return fig
|
| 54 |
|
|
|
|
| 55 |
|
| 56 |
description = """
|
| 57 |
MonoScene Demo on SemanticKITTI Validation Set (Sequence 08), which uses the <b>camera parameters of Sequence 08</b>.
|
| 58 |
Due to the <b>CPU-only</b> inference, it might take up to 20s to predict a scene. \n
|
| 59 |
+
The output is <b>downsampled by 2</b> for faster rendering. <b>Darker</b> colors represent the <b>scenery outside the Field of View</b>, i.e. not visible on the image.
|
| 60 |
<center>
|
| 61 |
<a href="https://cv-rits.github.io/MonoScene/">
|
| 62 |
<img style="display:inline" alt="Project page" src="https://img.shields.io/badge/Project%20Page-MonoScene-red">
|
|
|
|
| 73 |
"""
|
| 74 |
|
| 75 |
examples = [
|
| 76 |
+
'images/08/001385.jpg',
|
| 77 |
+
'images/08/000295.jpg',
|
| 78 |
+
'images/08/002505.jpg',
|
| 79 |
'images/08/000085.jpg',
|
| 80 |
'images/08/000290.jpg',
|
| 81 |
'images/08/000465.jpg',
|
|
|
|
| 84 |
'images/08/001380.jpg',
|
| 85 |
'images/08/001530.jpg',
|
| 86 |
'images/08/002360.jpg',
|
|
|
|
| 87 |
'images/08/004059.jpg',
|
| 88 |
'images/08/003149.jpg',
|
| 89 |
'images/08/001446.jpg',
|
| 90 |
+
'images/08/000010.jpg',
|
| 91 |
'images/08/001122.jpg',
|
| 92 |
'images/08/003533.jpg',
|
| 93 |
'images/08/003365.jpg',
|
helpers.py
CHANGED
|
@@ -188,7 +188,7 @@ def draw(
|
|
| 188 |
fov_mask,
|
| 189 |
# img_size,
|
| 190 |
# f,
|
| 191 |
-
voxel_size=0.
|
| 192 |
# d=7, # 7m - determine the size of the mesh representing the camera
|
| 193 |
):
|
| 194 |
|
|
|
|
| 188 |
fov_mask,
|
| 189 |
# img_size,
|
| 190 |
# f,
|
| 191 |
+
voxel_size=0.4,
|
| 192 |
# d=7, # 7m - determine the size of the mesh representing the camera
|
| 193 |
):
|
| 194 |
|
images/08/000295.jpg
ADDED
|
images/08/001385.jpg
ADDED
|
monoscene/__pycache__/CRP3D.cpython-37.pyc
DELETED
|
Binary file (2.34 kB)
|
|
|
monoscene/__pycache__/DDR.cpython-37.pyc
DELETED
|
Binary file (3.07 kB)
|
|
|
monoscene/__pycache__/__init__.cpython-37.pyc
DELETED
|
Binary file (144 Bytes)
|
|
|
monoscene/__pycache__/config.cpython-37.pyc
DELETED
|
Binary file (1.19 kB)
|
|
|
monoscene/__pycache__/flosp.cpython-37.pyc
DELETED
|
Binary file (1.26 kB)
|
|
|
monoscene/__pycache__/modules.cpython-37.pyc
DELETED
|
Binary file (6.39 kB)
|
|
|
monoscene/__pycache__/monoscene.cpython-37.pyc
DELETED
|
Binary file (2.48 kB)
|
|
|
monoscene/__pycache__/monoscene_model.cpython-37.pyc
DELETED
|
Binary file (953 Bytes)
|
|
|
monoscene/__pycache__/unet2d.cpython-37.pyc
DELETED
|
Binary file (5.36 kB)
|
|
|
monoscene/__pycache__/unet3d_kitti.cpython-37.pyc
DELETED
|
Binary file (2.01 kB)
|
|
|
monoscene/__pycache__/unet3d_nyu.cpython-37.pyc
DELETED
|
Binary file (2.14 kB)
|
|
|
monoscene/monoscene.py
CHANGED
|
@@ -96,15 +96,15 @@ class MonoScene(pl.LightningModule):
|
|
| 96 |
if x3d is None:
|
| 97 |
x3d = self.projects[str(scale_2d)](
|
| 98 |
x_rgb["1_" + str(scale_2d)][i],
|
| 99 |
-
torch.div(projected_pix, scale_2d, rounding_mode='floor'),
|
| 100 |
-
|
| 101 |
fov_mask,
|
| 102 |
)
|
| 103 |
else:
|
| 104 |
x3d += self.projects[str(scale_2d)](
|
| 105 |
x_rgb["1_" + str(scale_2d)][i],
|
| 106 |
-
torch.div(projected_pix, scale_2d, rounding_mode='floor'),
|
| 107 |
-
|
| 108 |
fov_mask,
|
| 109 |
)
|
| 110 |
x3ds.append(x3d)
|
|
|
|
| 96 |
if x3d is None:
|
| 97 |
x3d = self.projects[str(scale_2d)](
|
| 98 |
x_rgb["1_" + str(scale_2d)][i],
|
| 99 |
+
# torch.div(projected_pix, scale_2d, rounding_mode='floor'),
|
| 100 |
+
projected_pix // scale_2d,
|
| 101 |
fov_mask,
|
| 102 |
)
|
| 103 |
else:
|
| 104 |
x3d += self.projects[str(scale_2d)](
|
| 105 |
x_rgb["1_" + str(scale_2d)][i],
|
| 106 |
+
# torch.div(projected_pix, scale_2d, rounding_mode='floor'),
|
| 107 |
+
projected_pix // scale_2d,
|
| 108 |
fov_mask,
|
| 109 |
)
|
| 110 |
x3ds.append(x3d)
|