Skip to content

Commit

Permalink
Merge pull request #26 from transientlunatic/tests-add-likelihood-ran…
Browse files Browse the repository at this point in the history
…ge-tests

Add tests to the CUDA implementation of the likelihood
  • Loading branch information
transientlunatic authored Aug 23, 2024
2 parents d17257d + 58ac10d commit 1601ad4
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 18 deletions.
33 changes: 18 additions & 15 deletions heron/likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,52 +322,55 @@ class TimeDomainLikelihoodModelUncertaintyPyTorch(TimeDomainLikelihoodPyTorch):
def __init__(self, data, psd, waveform=None, detector=None):
super().__init__(data, psd, waveform, detector)

def _normalisation(self, K, S):
def _normalisation(self, K, S, indices):
(ind_a, ind_b) = indices
norm = (
-1.5 * K.shape[0] * self.log(2 * self.pi)
- 0.5 * self.logdet(K)
+ 0.5 * self.logdet(self.C)
- 0.5 * self.logdet(self.solve(K, self.C) + self.eye(K.shape[0]))
+ 0.5 * self.logdet(self.C[ind_a[0]:ind_a[1],ind_a[0]:ind_a[1]])
- 0.5 * self.logdet(self.solve(K, self.C[ind_a[0]:ind_a[1],ind_a[0]:ind_a[1]]) + self.eye(K.shape[0]))
)
return norm

def _weighted_data(self):
def _weighted_data(self, indices):
"""Return the weighted data component"""
# TODO This can all be pre-computed
(ind_a, ind_b) = indices
if not hasattr(self, "weighted_data_CACHE"):
dw = self.weighted_data_CACHE = (
-0.5 * self.data.T @ self.solve(self.C, self.data)
)
else:
dw = self.weighted_data_CACHE
return dw
return dw#[ind_a[0]:ind_a[1]]

def _weighted_model(self, mu, K):
"""Return the inner product of the GPR mean"""
mu = torch.tensor(mu, device=self.device, dtype=torch.double)
return -0.5 * mu.T @ self.solve(K, mu)

def _weighted_cross(self, mu, K):
a = self.solve(self.C, self.data) + self.solve(K, mu)
b = self.inverse_C + self.inverse(K)
def _weighted_cross(self, mu, K, indices):
(ind_a, ind_b) = indices
a = self.solve(self.C[ind_a[0]:ind_a[1],ind_a[0]:ind_a[1]], self.data[ind_a[0]:ind_a[1]]) + self.solve(K, mu)
b = self.inverse_C[ind_a[0]:ind_a[1],ind_a[0]:ind_a[1]] + self.inverse(K)
return 0.5 * a.T @ self.solve(b, a)

def log_likelihood(self, waveform):
def log_likelihood(self, waveform, norm=True):
a, b = self.timeseries.determine_overlap(self, waveform)

waveform_d = torch.tensor(waveform.data, device=self.device, dtype=torch.double)
waveform_d = torch.tensor(waveform.data, device=self.device, dtype=torch.double)[b[0]:b[1]]
waveform_c = torch.tensor(
waveform.covariance, device=self.device, dtype=torch.double
)
like = self._weighted_cross(waveform_d, waveform_c)
)[b[0]:b[1], b[0]:b[1]]
like = self._weighted_cross(waveform_d, waveform_c, indices=(a,b))
# print("cross term", like)
A = self._weighted_data()
A = self._weighted_data(indices=(a,b))
# print("data term", A)
B = self._weighted_model(waveform_d, waveform_c)
# print("model term", B)
like = like + A + B
norm = self._normalisation(waveform_c, self.C)
normalisation = self._normalisation(waveform_c, self.C, indices=(a,b))
# print("normalisation", norm)
like += norm
like += normalisation if norm else 0

return like
57 changes: 54 additions & 3 deletions tests/test_inference_uncertain.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,15 @@
from heron.models.lalsimulation import SEOBNRv3, IMRPhenomPv2, IMRPhenomPv2_FakeUncertainty
from heron.models.lalnoise import AdvancedLIGO
from heron.injection import make_injection, make_injection_zero_noise
from heron.detector import Detector, AdvancedLIGOHanford, AdvancedLIGOLivingston, AdvancedVirgo
from heron.likelihood import MultiDetector, TimeDomainLikelihood, TimeDomainLikelihoodModelUncertainty, TimeDomainLikelihoodPyTorch
#, TimeDomainLikelihoodModelUncertaintyPyTorch
from heron.detector import (Detector,
AdvancedLIGOHanford,
AdvancedLIGOLivingston,
AdvancedVirgo)
from heron.likelihood import (MultiDetector,
TimeDomainLikelihood,
TimeDomainLikelihoodModelUncertainty,
TimeDomainLikelihoodPyTorch,
TimeDomainLikelihoodModelUncertaintyPyTorch)

from heron.inference import heron_inference, parse_dict, load_yaml

Expand Down Expand Up @@ -61,3 +67,48 @@ def test_likelihood_maximum_at_true_value_mass_ratio(self):
log_likes.append(likelihood.log_likelihood(projected_waveform, norm=False))

self.assertTrue(np.abs(mass_ratios[np.argmax(log_likes)] - 0.6) < 0.1)

@unittest.skipIf(CUDA_NOT_AVAILABLE, "CUDA is not installed on this system")
class Test_Likelihood_ZeroNoise_With_Uncertainty_PyTorch(unittest.TestCase):
"""
Test likelihoods on a zero noise injection.
"""

def setUp(self):
self.waveform = IMRPhenomPv2_FakeUncertainty()
self.psd_model = AdvancedLIGO()

self.injections = make_injection_zero_noise(waveform=IMRPhenomPv2,
injection_parameters={"distance": 1000*u.megaparsec,
"mass_ratio": 0.6,
"gpstime": 0,
"total_mass": 60 * u.solMass},
detectors={"AdvancedLIGOHanford": "AdvancedLIGO",
"AdvancedLIGOLivingston": "AdvancedLIGO"}
)



def test_likelihood_maximum_at_true_value_mass_ratio(self):

data = self.injections['H1']

likelihood = TimeDomainLikelihoodModelUncertaintyPyTorch(data, psd=self.psd_model)
mass_ratios = np.linspace(0.1, 1.0, 100)

log_likes = []
for mass_ratio in mass_ratios:

test_waveform = self.waveform.time_domain(parameters={"distance": 1000*u.megaparsec,
"mass_ratio": mass_ratio,
"gpstime": 0,
"total_mass": 60 * u.solMass}, times=likelihood.times)
projected_waveform = test_waveform.project(AdvancedLIGOHanford(),
ra=0, dec=0,
gpstime=0,
phi_0=0, psi=0,
iota=0)

log_likes.append(likelihood.log_likelihood(projected_waveform, norm=False).cpu().numpy())

self.assertTrue(np.abs(mass_ratios[np.argmax(log_likes)] - 0.6) < 0.1)

0 comments on commit 1601ad4

Please sign in to comment.