Skip to content

Commit

Permalink
Merge pull request #25 from transientlunatic/tests-add-likelihood-ran…
Browse files Browse the repository at this point in the history
…ge-tests

Update the numpy likelihoods
  • Loading branch information
transientlunatic authored Aug 22, 2024
2 parents 5bf0cdc + 1389ebd commit d17257d
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 19 deletions.
44 changes: 31 additions & 13 deletions heron/likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,9 @@ class TimeDomainLikelihoodModelUncertainty(TimeDomainLikelihood):
def __init__(self, data, psd, waveform=None, detector=None):
super().__init__(data, psd, waveform, detector)

self.norm_factor_2 = np.max(self.C)
self.norm_factor = np.sqrt(self.norm_factor_2)

def _normalisation(self, K, S):
norm = (
-1.5 * K.shape[0] * self.log(2 * self.pi)
Expand All @@ -135,12 +138,13 @@ def _normalisation(self, K, S):
)
return norm

def _weighted_data(self):
def _weighted_data(self, indices):
"""Return the weighted data component"""
# TODO This can all be pre-computed
(a, b) = indices
if not hasattr(self, "weighted_data_CACHE"):
dw = self.weighted_data_CACHE = (
-0.5 * self.data.T @ self.solve(self.C, self.data)
-0.5 * (np.array(self.data)/np.sqrt(self.norm_factor))[a[0]:a[1]].T @ self.solve((self.C/self.norm_factor_2)[a[0]:a[1], a[0]:a[1]], self.data[a[0]:a[1]])
)
else:
dw = self.weighted_data_CACHE
Expand All @@ -150,22 +154,34 @@ def _weighted_model(self, mu, K):
"""Return the inner product of the GPR mean"""
return -0.5 * np.array(mu).T @ self.solve(K, mu)

def _weighted_cross(self, mu, K):
a = self.solve(self.C, self.data) + self.solve(K, mu)
b = self.inverse_C + self.inverse(K)
return 0.5 * a.T @ self.solve(b, a)
def _weighted_cross(self, mu, K, indices):
# NB the first part of this is repeated elsewhere
(a,b) = indices
C = (self.C/self.norm_factor_2)[a[0]:a[1],a[0]:a[1]]
data = (self.data/self.norm_factor)[a[0]:a[1]]

A = (self.solve(C, data) - self.solve(K, mu))
B = (self.inverse_C*self.norm_factor_2)[a[0]:a[1],a[0]:a[1]] + self.inverse(K)
return 0.5 * A.T @ self.solve(B, A)

def log_likelihood(self, waveform):
like = self._weighted_cross(waveform.data, waveform.covariance)
def log_likelihood(self, waveform, norm=True):
a, b = self.timeseries.determine_overlap(self, waveform)

wf = np.array(waveform.data)[b[0]:b[1]]
wc = waveform.covariance[b[0]:b[1],b[0]:b[1]]
wc /= self.norm_factor_2 #np.max(wc)
wf /= self.norm_factor #np.sqrt(np.max(wc))

like = - self._weighted_cross(wf, wc, indices=(a,b))
# print("cross term", like)
A = self._weighted_data()
A = self._weighted_data((a, b))
# print("data term", A)
B = self._weighted_model(waveform.data, waveform.covariance)
B = self._weighted_model(wf, wc)
# print("model term", B)
like = like + A + B
norm = self._normalisation(waveform.covariance, self.C)
like = like - A - B
N = self._normalisation(waveform.covariance/self.norm_factor, self.C/self.norm_factor_2)
# print("normalisation", norm)
like += norm
like += (N if norm else 0)

return like

Expand Down Expand Up @@ -337,6 +353,8 @@ def _weighted_cross(self, mu, K):
return 0.5 * a.T @ self.solve(b, a)

def log_likelihood(self, waveform):
a, b = self.timeseries.determine_overlap(self, waveform)

waveform_d = torch.tensor(waveform.data, device=self.device, dtype=torch.double)
waveform_c = torch.tensor(
waveform.covariance, device=self.device, dtype=torch.double
Expand Down
2 changes: 1 addition & 1 deletion heron/models/lalsimulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def __init__(self, covariance=1e-24):

def time_domain(self, parameters, times=None):
waveform_dict = super().time_domain(parameters, times)
covariance = torch.eye(len(waveform_dict["plus"].times)) * self.covariance
covariance = np.eye((len(waveform_dict["plus"].times))) * self.covariance**2
for wave in waveform_dict.waveforms.values():
# Artificially add a covariance function to each of these
wave.covariance = covariance
Expand Down
2 changes: 0 additions & 2 deletions profiling/cuda/cuda_likelihood.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

def profile_likelihood_pytorch_nouncert():
waveform = IMRPhenomPv2()

psd_model = AdvancedLIGO()

injections = make_injection_zero_noise(waveform=IMRPhenomPv2,
Expand Down Expand Up @@ -40,7 +39,6 @@ def profile_likelihood_pytorch_nouncert():
log_like = likelihood.log_likelihood(projected_waveform)



from torch.profiler import profile, record_function, ProfilerActivity

with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
Expand Down
5 changes: 2 additions & 3 deletions tests/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,8 @@ def test_likelihood_numpy_equivalent(self):

self.assertTrue(mass_ratios[np.argmax(log_likes)] == 0.6)
self.assertTrue(np.all((np.array(log_likes) - np.array(log_likes_n)) < 0.001))




class Test_Filter(unittest.TestCase):
"""Test that filters can be applied correctly to data."""
Expand Down Expand Up @@ -225,8 +226,6 @@ def test_snr(self):
phi_0=0, psi=0,
iota=0)

f = projected_waveform.plot()
f.savefig("projected_waveform.png")

snr = likelihood.snr(projected_waveform)
self.assertTrue(snr > 40 and snr < 45)
Expand Down
63 changes: 63 additions & 0 deletions tests/test_inference_uncertain.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import unittest

import numpy as np
import astropy.units as u
import bilby.gw.prior

from heron.models.lalsimulation import SEOBNRv3, IMRPhenomPv2, IMRPhenomPv2_FakeUncertainty
from heron.models.lalnoise import AdvancedLIGO
from heron.injection import make_injection, make_injection_zero_noise
from heron.detector import Detector, AdvancedLIGOHanford, AdvancedLIGOLivingston, AdvancedVirgo
from heron.likelihood import MultiDetector, TimeDomainLikelihood, TimeDomainLikelihoodModelUncertainty, TimeDomainLikelihoodPyTorch
#, TimeDomainLikelihoodModelUncertaintyPyTorch

from heron.inference import heron_inference, parse_dict, load_yaml

from torch.cuda import is_available

CUDA_NOT_AVAILABLE = not is_available()


class Test_Likelihood_ZeroNoise_With_Uncertainty(unittest.TestCase):
"""
Test likelihoods on a zero noise injection.
"""

def setUp(self):
self.waveform = IMRPhenomPv2_FakeUncertainty()
self.psd_model = AdvancedLIGO()

self.injections = make_injection_zero_noise(waveform=IMRPhenomPv2,
injection_parameters={"distance": 1000*u.megaparsec,
"mass_ratio": 0.6,
"gpstime": 0,
"total_mass": 60 * u.solMass},
detectors={"AdvancedLIGOHanford": "AdvancedLIGO",
"AdvancedLIGOLivingston": "AdvancedLIGO"}
)



def test_likelihood_maximum_at_true_value_mass_ratio(self):

data = self.injections['H1']

likelihood = TimeDomainLikelihoodModelUncertainty(data, psd=self.psd_model)
mass_ratios = np.linspace(0.1, 1.0, 100)

log_likes = []
for mass_ratio in mass_ratios:

test_waveform = self.waveform.time_domain(parameters={"distance": 1000*u.megaparsec,
"mass_ratio": mass_ratio,
"gpstime": 0,
"total_mass": 60 * u.solMass}, times=likelihood.times)
projected_waveform = test_waveform.project(AdvancedLIGOHanford(),
ra=0, dec=0,
gpstime=0,
phi_0=0, psi=0,
iota=0)

log_likes.append(likelihood.log_likelihood(projected_waveform, norm=False))

self.assertTrue(np.abs(mass_ratios[np.argmax(log_likes)] - 0.6) < 0.1)

0 comments on commit d17257d

Please sign in to comment.