Skip to content

Commit

Permalink
Merge pull request #19 from pfxuan/fix-libtorch-2.2
Browse files Browse the repository at this point in the history
Support latest libtorch 2.2.x
  • Loading branch information
pierotofy authored Feb 24, 2024
2 parents 852f90b + 4787145 commit 82eae7a
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 8 deletions.
9 changes: 5 additions & 4 deletions .github/workflows/cuda/Linux.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ case ${1} in
cu121)
CUDA=12.1
APT_KEY=${OS}-${CUDA/./-}-local
FILENAME=cuda-repo-${APT_KEY}_${CUDA}.0-530.30.02-1_amd64.deb
URL=https://developer.download.nvidia.com/compute/cuda/${CUDA}.0/local_installers
FILENAME=cuda-repo-${APT_KEY}_${CUDA}.1-530.30.02-1_amd64.deb
URL=https://developer.download.nvidia.com/compute/cuda/${CUDA}.1/local_installers
;;
cu118)
CUDA=11.8
Expand Down Expand Up @@ -64,7 +64,8 @@ else
sudo apt-key add /var/cuda-repo-${APT_KEY}/7fa2af80.pub
fi

sudo apt-get update
sudo apt-get -y install cuda
sudo apt-get -qq update
sudo apt install -y cuda-nvcc-${CUDA/./-} cuda-libraries-dev-${CUDA/./-} cuda-command-line-tools-${CUDA/./-}
sudo apt clean

rm -f ${FILENAME}
12 changes: 9 additions & 3 deletions .github/workflows/ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-22.04, ubuntu-20.04] # [ubuntu-22.04, ubuntu-20.04, ubuntu-18.04]
torch-version: [2.1.2] # [1.12.0, 1.13.0, 2.0.0, 2.1.0, 2.1.1, 2.1.2, 2.2.0]
torch-version: [2.1.2, 2.2.1] # [1.12.0, 1.13.0, 2.0.0, 2.1.0, 2.1.1, 2.1.2, 2.2.0, 2.2.1]
cuda-version: ['cu118', 'cu121'] # ['cpu', 'cu113', 'cu116', 'cu117']
cmake-build-type: [Release] # [Debug, ClangTidy]

Expand All @@ -36,12 +36,18 @@ jobs:
sudo apt-get update
sudo apt-get install -y \
build-essential \
ccache \
cmake \
ninja-build \
libopencv-dev \
wget
- name: Install ccache
run: |
wget -nv https://github.com/ccache/ccache/releases/download/v4.9.1/ccache-4.9.1-linux-x86_64.tar.xz
sudo tar xf ccache-4.9.1-linux-x86_64.tar.xz -C /usr/bin --strip-components=1 --no-same-owner ccache-4.9.1-linux-x86_64/ccache
rm -f ccache-*-linux-x86_64.tar.xz
ccache --version
- name: Install CUDA ${{ matrix.cuda-version }}
if: ${{ matrix.cuda-version != 'cpu' }}
run: |
Expand Down Expand Up @@ -83,7 +89,7 @@ jobs:
-DCMAKE_PREFIX_PATH=${{ github.workspace }}/libtorch \
-DCMAKE_INSTALL_PREFIX=${{github.workspace}}/install \
-DCUDA_TOOLKIT_ROOT_DIR=$CUDA_HOME
ninja -k 8
ninja
- name: Clean compiler cache
run: |
Expand Down
22 changes: 21 additions & 1 deletion model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,11 @@ int Model::getDownscaleFactor(int step){

void Model::addToOptimizer(torch::optim::Adam *optimizer, const torch::Tensor &newParam, const torch::Tensor &idcs, int nSamples){
torch::Tensor param = optimizer->param_groups()[0].params()[0];
#if TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR > 1
auto pId = param.unsafeGetTensorImpl();
#else
auto pId = c10::guts::to_string(param.unsafeGetTensorImpl());
#endif
auto paramState = std::make_unique<torch::optim::AdamParamState>(static_cast<torch::optim::AdamParamState&>(*optimizer->state()[pId]));

std::vector<int64_t> repeats;
Expand All @@ -208,21 +212,33 @@ void Model::addToOptimizer(torch::optim::Adam *optimizer, const torch::Tensor &n

optimizer->state().erase(pId);

#if TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR > 1
auto newPId = newParam.unsafeGetTensorImpl();
#else
auto newPId = c10::guts::to_string(newParam.unsafeGetTensorImpl());
#endif
optimizer->state()[newPId] = std::move(paramState);
optimizer->param_groups()[0].params()[0] = newParam;
}

void Model::removeFromOptimizer(torch::optim::Adam *optimizer, const torch::Tensor &newParam, const torch::Tensor &deletedMask){
torch::Tensor param = optimizer->param_groups()[0].params()[0];
#if TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR > 1
auto pId = param.unsafeGetTensorImpl();
#else
auto pId = c10::guts::to_string(param.unsafeGetTensorImpl());
#endif
auto paramState = std::make_unique<torch::optim::AdamParamState>(static_cast<torch::optim::AdamParamState&>(*optimizer->state()[pId]));

paramState->exp_avg(paramState->exp_avg().index({~deletedMask}));
paramState->exp_avg_sq(paramState->exp_avg_sq().index({~deletedMask}));

optimizer->state().erase(pId);
#if TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR > 1
auto newPId = newParam.unsafeGetTensorImpl();
#else
auto newPId = c10::guts::to_string(newParam.unsafeGetTensorImpl());
#endif
optimizer->param_groups()[0].params()[0] = newParam;
optimizer->state()[newPId] = std::move(paramState);
}
Expand Down Expand Up @@ -383,7 +399,11 @@ void Model::afterTrain(int step){

// Reset optimizer
torch::Tensor param = opacitiesOpt->param_groups()[0].params()[0];
auto pId = c10::guts::to_string(param.unsafeGetTensorImpl());
#if TORCH_VERSION_MAJOR == 2 && TORCH_VERSION_MINOR > 1
auto pId = param.unsafeGetTensorImpl();
#else
auto pId = c10::guts::to_string(param.unsafeGetTensorImpl());
#endif
auto paramState = std::make_unique<torch::optim::AdamParamState>(static_cast<torch::optim::AdamParamState&>(*opacitiesOpt->state()[pId]));
paramState->exp_avg(torch::zeros_like(paramState->exp_avg()));
paramState->exp_avg_sq(torch::zeros_like(paramState->exp_avg_sq()));
Expand Down
1 change: 1 addition & 0 deletions model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

#include <iostream>
#include <torch/torch.h>
#include <torch/csrc/api/include/torch/version.h>
#include "nerfstudio.hpp"
#include "kdtree_tensor.hpp"
#include "spherical_harmonics.hpp"
Expand Down

0 comments on commit 82eae7a

Please sign in to comment.