Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
  • Loading branch information
cpfy committed Aug 5, 2022
1 parent 9190a19 commit 810e1ed
Show file tree
Hide file tree
Showing 9 changed files with 45 additions and 45 deletions.
8 changes: 4 additions & 4 deletions datasets/phototourism.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from kornia import create_meshgrid
import h5py

# additinal configuarion
# additional configuration
sfm_path = "sparse"
vis_octree = False
vis_intersection = False
Expand Down Expand Up @@ -274,7 +274,7 @@ def near_far_voxel(self, octree_data, rays_o, rays_d, image_name, chunk_size=655
voxel_near_sfm_all = []
voxel_far_sfm_all = []

# todo: figure out why chunck size greater or equal than 1768500 will result in error intersection
# todo: figure out why chunk size greater or equal than 1768500 will result in error intersection
# use 1000000 as a threshold just to be safe
chunk_size = min(rays_o.size()[0], 100000)
try:
Expand Down Expand Up @@ -663,9 +663,9 @@ def read_meta(self):

valid_num = torch.sum(valid_depth).long().item()
current_len = rays.size()[0]
curent_percent = valid_num / current_len
current_percent = valid_num / current_len
padding_length = int(np.ceil((self.depth_percent * current_len - valid_num) / (1 - self.depth_percent)))
print(f"padding valid depth percentage: from {curent_percent} to {self.depth_percent} with padding {padding_length}")
print(f"padding valid depth percentage: from {current_percent} to {self.depth_percent} with padding {padding_length}")

pad_ind = torch.floor((torch.rand(padding_length) * valid_num)).long()
result_length = padding_length + current_len
Expand Down
6 changes: 3 additions & 3 deletions lightning_modules/neuconw_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def __init__(self, hparams, config, caches):

spc_options = {
"voxel_size": self.scene_config["voxel_size"],
"recontruct_path": self.config.DATASET.ROOT_DIR,
"reconstruct_path": self.config.DATASET.ROOT_DIR,
"min_track_length": self.scene_config["min_track_length"],
}
self.renderer = NeuconWRenderer(
Expand Down Expand Up @@ -272,14 +272,14 @@ def octree_update(

del self.renderer.fine_octree_data

# get suface points
# get surface points
sparse_pc_sfm, train_voxel_size = self.surface_selection(
train_level, threshold, device, chunk
)

# use remaining points to generate new octree
octree_new, scene_origin, scale, level = gen_octree(
self.renderer.recontruct_path,
self.renderer.reconstruct_path,
sparse_pc_sfm,
train_voxel_size,
device=device,
Expand Down
12 changes: 6 additions & 6 deletions rendering/renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,14 +94,14 @@ def __init__(
self.sample_range = sample_range
self.fine_octree_data = None
if self.nerf_far_override:
self.recontruct_path = spc_options["recontruct_path"]
self.reconstruct_path = spc_options["reconstruct_path"]
self.min_track_length = spc_options["min_track_length"]
self.voxel_size = spc_options["voxel_size"]

self.sfm_to_gt = np.eye(4)

# read unit sphere origin and radius from scene config
scene_config_path = os.path.join(spc_options["recontruct_path"], "config.yaml")
scene_config_path = os.path.join(spc_options["reconstruct_path"], "config.yaml")
if os.path.isfile(scene_config_path):
with open(scene_config_path, "r") as yamlfile:
scene_config = yaml.load(yamlfile, Loader=yaml.FullLoader)
Expand Down Expand Up @@ -136,7 +136,7 @@ def __init__(

def get_octree(self, device):
octree, scene_origin, scale, level = gen_octree_from_sfm(
self.recontruct_path, self.min_track_length, self.voxel_size, device=device
self.reconstruct_path, self.min_track_length, self.voxel_size, device=device
)

octree_data = {}
Expand Down Expand Up @@ -385,7 +385,7 @@ def get_near_far_octree(self, octree_data, rays_o, rays_d, near, far):
octree_level = octree_data["level"]
spc_data = octree_data["spc_data"]

# transfrom origins and direction of rays to sfm coordinate system
# transform origins and direction of rays to sfm coordinate system
rays_o_sfm = (rays_o * self.radius).view(-1, 3) + self.origin

# generate near far from spc
Expand Down Expand Up @@ -423,7 +423,7 @@ def get_near_far_sdf(self, octree_data, rays_o, rays_d, near, far):
train_voxel_size = octree_data["voxel_size"]
train_spc_data = octree_data["spc_data"]

# transfrom origins and direction of rays to sfm coordinate system
# transform origins and direction of rays to sfm coordinate system
rays_o_sfm = (rays_o * self.radius).view(-1, 3) + self.origin

# generate near far from spc
Expand Down Expand Up @@ -456,7 +456,7 @@ def get_near_far_sdf(self, octree_data, rays_o, rays_d, near, far):
return voxel_near, voxel_far, ~miss_mask

def sparse_sampler(self, rays_o, rays_d, near, far, perturb):
"""sample on spaese voxel. Including upsample on sparse voxels,
"""sample on sparse voxel. Including upsample on sparse voxels,
and uniform sample on inverse depth of original near far,
Note that input coordinates are scaled to unit sphere
Expand Down
4 changes: 2 additions & 2 deletions tools/extract_mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,13 @@ def get_opts():
choices=['blender', 'phototourism'],
help='which dataset to validate')
parser.add_argument('--eval_level', type=int, default=-1,
help='level og precision')
help='level of precision')
parser.add_argument('--mesh_size', type=int, default=128,
help='resolution of mesh, (N, N, N)')
parser.add_argument('--mesh_origin', type=str, default="0, 0, 0",
help='origin of mesh, (x, y, z)')
parser.add_argument('--mesh_radius', type=float, default=1.0,
help='radius pf mesh')
help='radius of mesh')
parser.add_argument('--vertex_color', default=False, action="store_true",
help='whether add color to mesh')
parser.add_argument('--num_gpus', type=int, default=1,
Expand Down
32 changes: 16 additions & 16 deletions tools/prepare_data/generate_voxel.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def expand_points(points, voxel_size):


def gen_octree_from_sfm(
recontruct_path,
reconstruct_path,
min_track_length,
voxel_size,
sfm_path="sparse",
Expand All @@ -49,7 +49,7 @@ def gen_octree_from_sfm(
radius=1.0,
):
# read 3d points from sfm result, and filter them
point_path = os.path.join(recontruct_path, f"dense/{sfm_path}/points3D.bin")
point_path = os.path.join(reconstruct_path, f"dense/{sfm_path}/points3D.bin")
points_3d = read_points3d_binary(point_path)
points_ori = []
for id, p in points_3d.items():
Expand All @@ -68,12 +68,12 @@ def gen_octree_from_sfm(
o3d.io.write_point_cloud(f"samples/voxel_vis_source.ply", gt_pcd)

return gen_octree(
recontruct_path, points, voxel_size, device, visualize, expand, radius
reconstruct_path, points, voxel_size, device, visualize, expand, radius
)


def gen_octree(
recontruct_path,
reconstruct_path,
points,
voxel_size,
device=0,
Expand All @@ -82,7 +82,7 @@ def gen_octree(
radius=1.0,
in_sfm=True,
):
scene_config_path = os.path.join(recontruct_path, "config.yaml")
scene_config_path = os.path.join(reconstruct_path, "config.yaml")
# read scene config
with open(scene_config_path, "r") as yamlfile:
scene_config = yaml.load(yamlfile, Loader=yaml.FullLoader)
Expand All @@ -107,7 +107,7 @@ def gen_octree(
# dimensions
dim = np.max(bbx_max - bbx_min)

# points dialation
# points dilation
for _ in range(expand):
points = expand_points(points, voxel_size)

Expand Down Expand Up @@ -192,7 +192,7 @@ def level_upgrade(
octree_scale,
src_level,
target_level,
recontruct_path,
reconstruct_path,
visualize=False,
):
# upsample octree
Expand Down Expand Up @@ -231,7 +231,7 @@ def level_upgrade(
xyz_sfm = sparse_ind_up * target_voxel_size + vol_origin

return gen_octree(
recontruct_path,
reconstruct_path,
xyz_sfm.cpu().numpy(),
target_voxel_size,
device=device,
Expand All @@ -246,7 +246,7 @@ def level_downgrade(
octree_scale,
src_level,
target_level,
recontruct_path,
reconstruct_path,
visualize=False,
):
device = octree.device
Expand All @@ -266,7 +266,7 @@ def level_downgrade(
target_voxel_size = 2 / (2**target_level) * octree_scale

return gen_octree(
recontruct_path,
reconstruct_path,
xyz_sfm,
target_voxel_size,
device=device,
Expand All @@ -281,7 +281,7 @@ def octree_level_adjust(
octree_scale,
src_level,
target_level,
recontruct_path,
reconstruct_path,
visualize,
):
if target_level > src_level:
Expand All @@ -291,7 +291,7 @@ def octree_level_adjust(
octree_scale,
src_level,
target_level,
recontruct_path,
reconstruct_path,
visualize,
)
elif target_level < src_level:
Expand All @@ -301,7 +301,7 @@ def octree_level_adjust(
octree_scale,
src_level,
target_level,
recontruct_path,
reconstruct_path,
visualize,
)
else:
Expand All @@ -328,7 +328,7 @@ def get_near_far(
'with_exit': set true to obtain accurate far. Default to false as this will perform aabb twice
"""
# Avoid corner cases. issuse in kaolin: https://github.com/NVIDIAGameWorks/kaolin/issues/490
# Avoid corner cases. issues in kaolin: https://github.com/NVIDIAGameWorks/kaolin/issues/490
rays_d = rays_d.clone() + 1e-7
rays_o = rays_o.clone() + 1e-7

Expand Down Expand Up @@ -446,10 +446,10 @@ def get_near_far(
from torch.utils.data import DataLoader

voxel_size = 0.1
recontruct_path = "/nas/datasets/IMC/phototourism/training_set/brandenburg_gate"
reconstruct_path = "/nas/datasets/IMC/phototourism/training_set/brandenburg_gate"
min_track_length = 50
octree, scene_origin, scale, level = gen_octree_from_sfm(
recontruct_path, min_track_length, voxel_size, visualize=True
reconstruct_path, min_track_length, voxel_size, visualize=True
)
# gen fake ray origin and direction
rays_o = (
Expand Down
16 changes: 8 additions & 8 deletions tools/reproj_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,13 +140,13 @@ def image_reproj_error(imdata, pts3d, img_ids, entrinsics_dict, intrinsics_dict)



def gt_reproject_error(data_dir, gt_pcd_path, sfm_to_gt, reconstuct_path, track_length=200, reproj_error=0.4, batch_size=2, img_reproj_error=300):
def gt_reproject_error(data_dir, gt_pcd_path, sfm_to_gt, reconstruct_path, track_length=200, reproj_error=0.4, batch_size=2, img_reproj_error=300):
# 0. read data from bin
# reconstuct_path = 'dense_ws_filtered_tkl200_mrep.5'
# reconstuct_path = 'dense/sparse'
imdata = read_images_binary(os.path.join(data_dir, reconstuct_path, 'images.bin'))
camdata = read_cameras_binary(os.path.join(data_dir, reconstuct_path, 'cameras.bin'))
pts3d = read_points3d_binary(os.path.join(data_dir, reconstuct_path, 'points3D.bin'))
# reconstruct_path = 'dense_ws_filtered_tkl200_mrep.5'
# reconstruct_path = 'dense/sparse'
imdata = read_images_binary(os.path.join(data_dir, reconstruct_path, 'images.bin'))
camdata = read_cameras_binary(os.path.join(data_dir, reconstruct_path, 'cameras.bin'))
pts3d = read_points3d_binary(os.path.join(data_dir, reconstruct_path, 'points3D.bin'))

gt_pcd = torch.from_numpy(np.array(o3d.io.read_point_cloud(gt_pcd_path).points)).float().cuda()
img_ids, img_id_to_name = get_image_id(imdata, data_dir)
Expand Down Expand Up @@ -254,7 +254,7 @@ def get_opts():
parser.add_argument('--gt_pcd_path', type=str,
default="/nas/datasets/OpenHeritage3D/pro/brandenburg_gate/bg_sampled_0.01_cropped.ply",
help='target point cloud')
parser.add_argument('--reconstuct_path', type=str,
parser.add_argument('--reconstruct_path', type=str,
default="dense/sparse",
help='reconstruction work space')
parser.add_argument('--track_length', type=int, default='200',
Expand All @@ -274,4 +274,4 @@ def get_opts():
# read scene config
with open(os.path.join(args.data_dir, 'config.yaml'), "r") as yamlfile:
scene_config = yaml.load(yamlfile, Loader=yaml.FullLoader)
gt_reproject_error(args.data_dir, args.gt_pcd_path, np.array(scene_config['sfm2gt']), args.reconstuct_path, args.track_length, args.reproj_error, args.batch_size, args.img_reproj_error)
gt_reproject_error(args.data_dir, args.gt_pcd_path, np.array(scene_config['sfm2gt']), args.reconstruct_path, args.track_length, args.reproj_error, args.batch_size, args.img_reproj_error)
2 changes: 1 addition & 1 deletion utils/kaolin_renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __call__(self, height, width, intrinsic, pose):

depth_all = []
pid_all = []
# todo: figure out why chunck size greater or equal than 1768500 will result in error intersection
# todo: figure out why chunk size greater or equal than 1768500 will result in error intersection
# use 1000000 as a threshold just to be safe
chunk_size = min(rays_o.size()[0], 1000000)
for i in range(0, rays_o.size()[0], chunk_size):
Expand Down
6 changes: 3 additions & 3 deletions utils/reproj_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,12 +252,12 @@ def split_list(_list, n):


if __name__ == "__main__":
reconstuct_path = 'dense/sparse'
reconstruct_path = 'dense/sparse'
print(f"result will be saved to {args.output_path}")
os.makedirs(args.output_path, exist_ok=True)

imdata = read_images_binary(os.path.join(args.data_path, reconstuct_path, 'images.bin'))
camdata = read_cameras_binary(os.path.join(args.data_path, reconstuct_path, 'cameras.bin'))
imdata = read_images_binary(os.path.join(args.data_path, reconstruct_path, 'images.bin'))
camdata = read_cameras_binary(os.path.join(args.data_path, reconstruct_path, 'cameras.bin'))
img_ids_all, img_id_to_name, img_path_to_id = get_image_id(imdata, args.data_path)
img_ids = get_train_ids(args.data_path, img_ids_all, img_path_to_id)
print(f"views to process: {len(img_ids)}")
Expand Down
4 changes: 2 additions & 2 deletions utils/vis_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def get_opts():
return parser.parse_args()


def svae_plot(ind, data1, data2, name1, name2, save_path, name):
def save_plot(ind, data1, data2, name1, name2, save_path, name):
plt.plot(ind, np.array(data1) * 100, "-b", label=name1)
plt.plot(ind, np.array(data2) * 100, "-r", label=name2)
plt.legend(loc="upper left")
Expand Down Expand Up @@ -46,7 +46,7 @@ def vis_results(ours_path, colmap_path, save_name, max_num):
save_path = os.path.join("eval_results", f"{save_name}")
os.makedirs(save_path, exist_ok=True)
for key in ours_metrics.keys():
svae_plot(thresholds, ours_metrics[key][:max_num], colmap_metrics[key][:max_num], \
save_plot(thresholds, ours_metrics[key][:max_num], colmap_metrics[key][:max_num], \
"ours", "colmap", save_path, f"{key}")

if __name__ == "__main__":
Expand Down

0 comments on commit 810e1ed

Please sign in to comment.