Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to the latest packages + work with Python 3.10 #61

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,6 @@ You can create an anaconda environment called `conv_onet` using
conda env create -f environment.yaml
conda activate conv_onet
```
**Note**: you might need to install **torch-scatter** mannually following [the official instruction](https://github.com/rusty1s/pytorch_scatter#pytorch-140):
```
pip install torch-scatter==2.0.4 -f https://pytorch-geometric.com/whl/torch-1.4.0+cu101.html
```

Next, compile the extension modules.
You can do this via
Expand Down
45 changes: 21 additions & 24 deletions environment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,27 +4,24 @@ channels:
- pytorch
- defaults
dependencies:
- cython=0.29.2
- imageio=2.4.1
- numpy=1.15.4
- numpy-base=1.15.4
- matplotlib=3.0.3
- matplotlib-base=3.0.3
- pandas=0.23.4
- pillow=5.3.0
- pyembree=0.1.4
- pytest=4.0.2
- python=3.6.7
- pytorch=1.4.0
- pyyaml=3.13
- scikit-image=0.14.1
- scipy=1.1.0
- tensorboardx=1.4
- torchvision=0.2.1
- tqdm=4.28.1
- trimesh=2.37.7
- pip:
- h5py==2.9.0
- plyfile==0.7
- torch_scatter==2.0.4

- cython=0.29.34
- h5py=3.8.0
- imageio=2.27.0
- numpy=1.24.2
- matplotlib=3.7.1
- pandas=2.0.0
- pillow=9.4.0
- plyfile=0.8.1
- pyembree=0.1.6
- pykdtree=1.3.7.post0
- pytest=7.2.2
- python=3.10
- pytorch=2.0.0
- pytorch-scatter=2.1.1
- pyyaml=6.0
- scikit-image=0.20.0
- scipy=1.10.1
- tensorboardx=2.5.1
- torchvision=0.15.0
- tqdm=4.65.0
- trimesh=3.21.4
8 changes: 4 additions & 4 deletions generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@
model_dict = dataset.get_model_dict(idx)
except AttributeError:
model_dict = {'model': str(idx), 'category': 'n/a'}

modelname = model_dict['model']
category_id = model_dict.get('category', 'n/a')

Expand Down Expand Up @@ -118,7 +118,7 @@

if not os.path.exists(in_dir):
os.makedirs(in_dir)

# Timing dict
time_dict = {
'idx': idx,
Expand All @@ -134,7 +134,7 @@
# Also copy ground truth
if cfg['generation']['copy_groundtruth']:
modelpath = os.path.join(
dataset.dataset_folder, category_id, modelname,
dataset.dataset_folder, category_id, modelname,
cfg['data']['watertight_file'])
out_file_dict['gt'] = modelpath

Expand Down Expand Up @@ -207,7 +207,7 @@
time_df.to_pickle(out_time_file)

# Create pickle files with main statistics
time_df_class = time_df.groupby(by=['class name']).mean()
time_df_class = time_df.groupby(by=['class name']).mean(numeric_only=True)
time_df_class.to_pickle(out_time_file_class)

# Print results
Expand Down
13 changes: 0 additions & 13 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,6 @@
numpy_include_dir = numpy.get_include()

# Extensions
# pykdtree (kd tree)
pykdtree = Extension(
'src.utils.libkdtree.pykdtree.kdtree',
sources=[
'src/utils/libkdtree/pykdtree/kdtree.c',
'src/utils/libkdtree/pykdtree/_kdtree_core.c'
],
language='c',
extra_compile_args=['-std=c99', '-O3', '-fopenmp'],
extra_link_args=['-lgomp'],
include_dirs=[numpy_include_dir]
)

# mcubes (marching cubes algorithm)
mcubes_module = Extension(
Expand Down Expand Up @@ -76,7 +64,6 @@

# Gather all extension modules
ext_modules = [
pykdtree,
mcubes_module,
triangle_hash_module,
mise_module,
Expand Down
18 changes: 9 additions & 9 deletions src/common.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# import multiprocessing
import torch
from src.utils.libkdtree import KDTree
from pykdtree.kdtree import KDTree
import numpy as np
import math

Expand Down Expand Up @@ -56,7 +56,7 @@ def chamfer_distance_naive(points1, points2):

Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
points2 (numpy array): second point set
'''
assert(points1.size() == points2.size())
batch_size, T, _ = points1.size()
Expand Down Expand Up @@ -265,7 +265,7 @@ def normalize_3d_coordinate(p, padding=0.1):
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''

p_nor = p / (1 + padding + 10e-4) # (-0.5, 0.5)
p_nor = p_nor + 0.5 # range (0, 1)
# f there are outliers out of the range
Expand All @@ -286,15 +286,15 @@ def normalize_coord(p, vol_range, plane='xz'):
p[:, 0] = (p[:, 0] - vol_range[0][0]) / (vol_range[1][0] - vol_range[0][0])
p[:, 1] = (p[:, 1] - vol_range[0][1]) / (vol_range[1][1] - vol_range[0][1])
p[:, 2] = (p[:, 2] - vol_range[0][2]) / (vol_range[1][2] - vol_range[0][2])

if plane == 'xz':
x = p[:, [0, 2]]
elif plane =='xy':
x = p[:, [0, 1]]
elif plane =='yz':
x = p[:, [1, 2]]
else:
x = p
x = p
return x

def coordinate2index(x, reso, coord_type='2d'):
Expand Down Expand Up @@ -326,7 +326,7 @@ def coord2index(p, vol_range, reso=None, plane='xz'):
'''
# normalize to [0, 1]
x = normalize_coord(p, vol_range, plane=plane)

if isinstance(x, np.ndarray):
x = np.floor(x * reso).astype(int)
else: #* pytorch tensor
Expand All @@ -338,7 +338,7 @@ def coord2index(p, vol_range, reso=None, plane='xz'):
elif x.shape[1] == 3:
index = x[:, 0] + reso * (x[:, 1] + reso * x[:, 2])
index[index > reso**3] = reso**3

return index[None]

def update_reso(reso, depth):
Expand All @@ -353,7 +353,7 @@ def update_reso(reso, depth):
for i in range(base):
if ((reso + i) / base).is_integer():
reso = reso + i
break
break
return reso

def decide_total_volume_range(query_vol_metric, recep_field, unit_size, unet_depth):
Expand All @@ -377,7 +377,7 @@ def decide_total_volume_range(query_vol_metric, recep_field, unit_size, unet_dep
# handle the case when resolution is too large
if reso > 10000:
reso = 1

return input_vol, query_vol, reso

def add_key(base, new, base_name, new_name, device=None):
Expand Down
20 changes: 12 additions & 8 deletions src/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
from torchvision import transforms
from src import data
from src import conv_onet
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader


method_dict = {
Expand All @@ -13,13 +17,13 @@
def load_config(path, default_path=None):
''' Loads config file.

Args:
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
cfg_special = yaml.load(f, Loader=Loader)

# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
Expand All @@ -30,7 +34,7 @@ def load_config(path, default_path=None):
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
cfg = yaml.load(f, Loader=Loader)
else:
cfg = dict()

Expand Down Expand Up @@ -146,7 +150,7 @@ def get_dataset(mode, cfg, return_idx=False):
)
else:
raise ValueError('Invalid dataset "%s"' % cfg['data']['dataset'])

return dataset


Expand Down Expand Up @@ -184,13 +188,13 @@ def get_inputs_field(mode, cfg):
data.SubsamplePointcloud(cfg['data']['pointcloud_n']),
data.PointcloudNoise(cfg['data']['pointcloud_noise'])
])

inputs_field = data.PatchPointCloudField(
cfg['data']['pointcloud_file'],
cfg['data']['pointcloud_file'],
transform,
multi_files= cfg['data']['multi_files'],
)

elif input_type == 'voxels':
inputs_field = data.VoxelsField(
cfg['data']['voxels_file']
Expand All @@ -200,4 +204,4 @@ def get_inputs_field(mode, cfg):
else:
raise ValueError(
'Invalid input type (%s)' % input_type)
return inputs_field
return inputs_field
35 changes: 20 additions & 15 deletions src/data/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@
from torch.utils import data
import numpy as np
import yaml
try:
from yaml import CLoader as Loader
except:
from yaml import Loader

from src.common import decide_total_volume_range, update_reso


Expand Down Expand Up @@ -68,12 +73,12 @@ def __init__(self, dataset_folder, fields, split=None,

if os.path.exists(metadata_file):
with open(metadata_file, 'r') as f:
self.metadata = yaml.load(f)
self.metadata = yaml.load(f, Loader=Loader)
else:
self.metadata = {
c: {'id': c, 'name': 'n/a'} for c in categories
}
}

# Set index
for c_idx, c in enumerate(categories):
self.metadata[c]['idx'] = c_idx
Expand All @@ -94,17 +99,17 @@ def __init__(self, dataset_folder, fields, split=None,
split_file = os.path.join(subpath, split + '.lst')
with open(split_file, 'r') as f:
models_c = f.read().split('\n')

if '' in models_c:
models_c.remove('')

self.models += [
{'category': c, 'model': m}
for m in models_c
]

# precompute
if self.cfg['data']['input_type'] == 'pointcloud_crop':
if self.cfg['data']['input_type'] == 'pointcloud_crop':
self.split = split
# proper resolution for feature plane/volume of the ENTIRE scene
query_vol_metric = self.cfg['data']['padding'] + 1
Expand All @@ -114,7 +119,7 @@ def __init__(self, dataset_folder, fields, split=None,
depth = cfg['model']['encoder_kwargs']['unet_kwargs']['depth']
elif 'unet3d' in cfg['model']['encoder_kwargs']:
depth = cfg['model']['encoder_kwargs']['unet3d_kwargs']['num_levels']

self.depth = depth
#! for sliding-window case, pass all points!
if self.cfg['generation']['sliding_window']:
Expand All @@ -124,7 +129,7 @@ def __init__(self, dataset_folder, fields, split=None,
self.total_input_vol, self.total_query_vol, self.total_reso = \
decide_total_volume_range(query_vol_metric, recep_field, unit_size, depth)


def __len__(self):
''' Returns the length of the dataset.
'''
Expand All @@ -148,7 +153,7 @@ def __getitem__(self, idx):
data['pointcloud_crop'] = True
else:
info = c_idx

for field_name, field in self.fields.items():
try:
field_data = field.load(model_path, idx, info)
Expand All @@ -175,7 +180,7 @@ def __getitem__(self, idx):
data = self.transform(data)

return data

def get_vol_info(self, model_path):
''' Get crop information

Expand All @@ -193,15 +198,15 @@ def get_vol_info(self, model_path):
else:
num = np.random.randint(self.cfg['data']['multi_files'])
file_path = os.path.join(model_path, field_name, '%s_%02d.npz' % (field_name, num))

points_dict = np.load(file_path)
p = points_dict['points']
if self.split == 'train':
# randomly sample a point as the center of input/query volume
p_c = [np.random.uniform(p[:,i].min(), p[:,i].max()) for i in range(3)]
# p_c = [np.random.uniform(-0.55, 0.55) for i in range(3)]
p_c = np.array(p_c).astype(np.float32)

reso = query_vol_size + recep_field - 1
# make sure the defined reso can be properly processed by UNet
reso = update_reso(reso, self.depth)
Expand All @@ -224,7 +229,7 @@ def get_vol_info(self, model_path):
'input_vol' : input_vol,
'query_vol' : query_vol}
return vol_info

def get_model_dict(self, idx):
return self.models[idx]

Expand Down Expand Up @@ -261,9 +266,9 @@ def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
def set_num_threads(nt):
try:
try:
import mkl; mkl.set_num_threads(nt)
except:
except:
pass
torch.set_num_threads(1)
os.environ['IPC_ENABLE']='1'
Expand Down
Loading