Source code for simple_pe.likelihood.likelihood

import numpy as np
from scipy import special
from scipy.integrate import quad
import pylab
from simple_pe import fstat


[docs] def like_equal_d_cosi(a_hat, f, d, cosi): """ For a network equally sensitive to plus and cross, calculate the likelihood marginalized over the 2 phases. This a function of d and cosi. Note: we use uniform prior on phi, psi (=1/2pi) :param a_hat: the f-stat parameters of the signal :param f: the detector response (f = F+ = Fx) :param d: distance of template :param cosi: cos(inclination) of template """ # calculate the two dimensional likelihood in circular polarization # marginalized over the two phases ar_hat, al_hat = fstat.a_to_circ_amp(a_hat) d0 = a_hat[0] al = (d0 / d) * (1 - cosi) ** 2 / 4 ar = (d0 / d) * (1 + cosi) ** 2 / 4 like = np.exp(- f ** 2 * (al - al_hat) ** 2) * \ np.exp(- f ** 2 * (ar - ar_hat) ** 2) * \ np.special.i0e(2 * f ** 2 * al * al_hat) * \ np.special.i0e(2 * f ** 2 * ar * ar_hat) return like
[docs] def like_equal_cosi(a_hat, f, x, d_max=1000., make_plot=False): """ For a network equally sensitive to plus and cross, calculate the likelihood marginalized over the distance and 2 phases. This a function of cosi. We use uniform priors, specifically 1/2pi for psi,phi, 1/2 for cosi, 3 d^2dd/d_max^3 for dist. :param a_hat: the f-stat parameters of the signal :param f: the detector response (f = F+ = Fx) :param x: cos(inclination) of template :param d_max: maximum distance for marginalization :param make_plot: plot the likelihood vs distance """ ar_hat, al_hat = fstat.a_to_circ_amp(a_hat) d0 = a_hat[0] # we want to choose sensible ranges of integration: x4 = (1 + 6 * x ** 2 + x ** 4) a_peak = 2 * (al_hat * (1 - x) ** 2 + ar_hat * (1 + x) ** 2) / x4 a_width = 2. * np.sqrt(2) / (f * np.sqrt(x4)) l_circ = quad(lambda a: (3 * d0 ** 3) / d_max ** 3 * a ** -4 * like_equal_d_cosi(a_hat, f, d0 / a, x), np.max(a_peak - 5 * a_width, 0), a_peak + 5 * a_width, epsabs=1e-8, epsrel=1e-8)[0] if make_plot: a = np.linspace(np.max(a_peak - 5 * a_width, 0), a_peak + 5 * a_width) lc = np.zeros_like(a) for (i, a) in enumerate(a): lc[i] = a ** -4 * like_equal_d_cosi(a_hat, f, d0 / a, x) pylab.figure() pylab.plot(a, lc) return l_circ
[docs] def like_equal(a_hat, f, d_max=1000): """calculate the likelihood for network equally sensitive to plus and cross. Marginalized over the two phases, inclination and distance. We use uniform priors, specifically 1/2pi for psi,phi, 1/2 for cosi, 3 d^2dd/d_max^3 for distance d_max. """ lc = 0.5 * quad(lambda x: like_equal_cosi(a_hat, f, x, d_max), -1., 1., epsabs=1e-8, epsrel=1e-8)[0] return lc
[docs] def like_parts_d_cosi_psi(a_hat, f_plus, f_cross, x, psi): """ calculate the two dimensional likelihood, marginalized over phi log-likelihood can be written as: 1/2(ahat^2 - 2*d0/d * f(x, psi) * cos(2phi - phi0) + (d0/d)^2 g(x,psi)) return: ahat2, f, g :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param x: cos(inclination) :param psi: polarization """ c2p = np.cos(2 * psi) s2p = np.sin(2 * psi) x2 = (1 + x ** 2) / 2 c2phi_fac = a_hat[1] * f_plus ** 2 * x2 * c2p \ + a_hat[2] * f_cross ** 2 * x2 * s2p \ - a_hat[3] * f_plus ** 2 * x * s2p \ + a_hat[4] * f_cross ** 2 * x * c2p s2phi_fac = - a_hat[1] * f_plus ** 2 * x * s2p \ + a_hat[2] * f_cross ** 2 * x * c2p \ - a_hat[3] * f_plus ** 2 * x2 * c2p \ - a_hat[4] * f_cross ** 2 * x2 * s2p f = np.sqrt(c2phi_fac ** 2 + s2phi_fac ** 2) g = f_plus ** 2 * x2 ** 2 * c2p ** 2 + f_cross ** 2 * x2 ** 2 * s2p ** 2 \ + f_plus ** 2 * x ** 2 * s2p ** 2 + f_cross ** 2 * x ** 2 * c2p ** 2 f_resp = np.array([0, f_plus, f_cross, f_plus, f_cross]) ahat2 = sum(f_resp ** 2 * a_hat ** 2) return ahat2, f, g
[docs] def like_d_cosi_psi(a_hat, f_plus, f_cross, d, x, psi, marg=True): """ Return the likelihood marginalized over phi, using flat (1/2pi) prior :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param d: distance :param x: cos(inclination) :param psi: polarization :param marg: do or don't do the marginalization :returns: the marginalization factor. I don't think it includes the exp(rho^2/2) term. """ ahat2, f, g = like_parts_d_cosi_psi(a_hat, f_plus, f_cross, x, psi) d0 = a_hat[0] # Marginalizing over phi (from zero to 2 pi) gives: # 2 pi i0e(a f) exp(-1/2(ahat^2 - 2 f a + g a^2)) a = d0 / d like = np.exp(f * a - 0.5 * (ahat2 + g * a ** 2)) if marg: like *= special.i0e(f * a) return like
[docs] def like_d_cosi(a_hat, f_plus, f_cross, d, x): """ Return the likelihood marginalized over phi and psi, with a uniform (1/2pi) prior on both :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param d: distance :param x: cos(inclination) """ l = 2 / np.pi * quad(lambda p: like_d_cosi_psi(a_hat, f_plus, f_cross, d, x, p), 0, np.pi / 2)[0] return l
[docs] def like_cosi_psi(a_hat, f_plus, f_cross, x, psi, d_max=1000.): """ Return the likelihood marginalized over d and phi, using flat (1/2pi prior on phi; uniform volume on d up to d_max. :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param x: cos(inclination) :param psi: polarization :param d_max: maximum distance for marginalization """ ahat2, f, g = like_parts_d_cosi_psi(a_hat, f_plus, f_cross, x, psi) d0 = a_hat[0] # Marginalizing over phi gives: # 2 pi i0e(a f) exp(-1/2(ahat^2 - 2 f a + g a^2)) like = lambda a: 3 * d0 ** 2 / d_max ** 3 * a ** (-4) * \ np.special.i0e(f * a) * \ np.exp(f * a - 0.5 * (ahat2 + g * a ** 2)) l_psix = quad(like, max(0, f / g - 5 / np.sqrt(g)), f / g + 5 / np.sqrt(g)) return l_psix[0]
[docs] def like_cosi(a_hat, f_plus, f_cross, x, d_max=1000.): """ Return the likelihood marginalized over d, phi and psi. Use uniform (1/2pi) prior on phi, psi, uniform in volume prior over d out to d_max. :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param x: cos(inclination) :param d_max: maximum distance for marginalization """ l_x = 2 / np.pi * quad(lambda p: like_cosi_psi(a_hat, f_plus, f_cross, x, p, d_max), 0, np.pi / 2, epsabs=1e-8, epsrel=1e-8)[0] return l_x
[docs] def like(a_hat, f_plus, f_cross, d_max=1000.): """ Return the likelihood marginalized over all 4 f-stat parameteras. Use uniform (1/2pi) prior on phi, psi, uniform (1/2)on cosi, uniform in volume prior over d out to d_max. :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param d_max: maximum distance for marginalization """ l = 1. / 2 * quad(lambda x: like_cosi(a_hat, f_plus, f_cross, x, d_max), -1, 1, epsabs=1e-8, epsrel=1e-8)[0] return l
[docs] def like_from_snr(zh, zl, sigh, sigl, dt, dt_ring, f_plus_cross): """Calculate the likelihood from observed SNRs for a 2 detector network Parameters ---------- zh: np.complex The complex matched filter SNR for detector 1 zl: np.complex The complex matched filter SNR for detector 2 sigh: float sigma for detector 1 sigl: float sigma for detector 2 dt: float observed difference in merger time between the two detectors dt_ring: np.ndarray array containing the difference in merger time for ring in the sky f_plus_cross: np.ndarray array containing the detector sensitivity along the ring in the sky """ dmax = np.linalg.norm(np.array([sigh, sigl])) z = np.array((zh, zl)) i = (abs(dt_ring - dt)).argmin() f_pc = f_plus_cross[i] sigma = np.array([[sigh], [sigl]]) lik = np.zeros(len(f_pc)) coherent = np.zeros(len(f_pc)) left = np.zeros(len(f_pc)) right = np.zeros(len(f_pc)) for j, f in enumerate(f_pc): f_sig = f * sigma a = fstat.snr_f_to_a(z, f_sig) fp, fc, chi = fstat.dominant_polarization(f_sig) D, cosi, psi, phi = fstat.a_to_params(a) a_dp = fstat.params_to_a(D, cosi, psi - chi, phi) lik[j], ld = like_approx(a_dp, fp, fc, dmax) coherent[j] = ld["coh"] left[j] = ld["left"] right[j] = ld["right"] return lik, coherent, left, right
[docs] def loglike_approx(a_hat, f_plus, f_cross, d_max=1000., method="coh", correction=False): """ Calculate the approximate likelihood. This works for three cases: left and right circularly polarized and the standard coherent analysis. This reproduces the equations (A.18) and (A.29) from "Localization of transient gravitational wave sources: beyond triangulation", Class. Quantum Grav. 35 (2018) 105002 :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param d_max: maximum distance for marginalization :param method: approximation for calculating likelihood, one of "coh", "left", "right" :param correction: scale the approx likelihood to improve fit """ if (method == "left") or (method == "right"): a_hat = fstat.circ_project(a_hat, f_plus, f_cross, method) d_hat, cosi_hat, _, _ = fstat.a_to_params(a_hat) snr = fstat.expected_snr(a_hat, f_plus, f_cross) if snr == 0: loglike = 0 elif method == "coh": loglike = np.log(24 * (d_hat / d_max) ** 3 * d_hat ** 4 / (f_plus ** 2 * f_cross ** 2) / (1 - cosi_hat ** 2) ** 3) else: # the width in cos iota: cos_fac = np.sqrt(np.sqrt(2) * (f_cross ** 2 + f_plus ** 2) / (f_plus * f_cross)) cos_width = np.minimum(cos_fac / snr ** 0.5, 1) cos_int = 1 - (1 - cos_width) ** 4 loglike = ( np.log(3) - np.log(8) + 3 * np.log(d_hat / d_max) - 2 * np.log(snr) + np.log(cos_int) ) if correction: factor = 1 - 75. / (2 * snr ** 2) loglike += np.log(factor) + 0.2 return loglike, snr
[docs] def like_approx(a_hat, f_plus, f_cross, d_max=1000., correction=False): """ Calculate the approximate likelihood summed over left, right and coherent. :param a_hat: the F-stat A parameters :param f_plus: F_plus sensitivity :param f_cross: F_cross sensitivity :param d_max: maximum distance for marginalization :param correction: scale the approx likelihood to improve fit """ loglike = {} snr = {} like = {} for method in ["left", "right", "coh"]: loglike[method], snr[method] = loglike_approx(a_hat, f_plus, f_cross, d_max, method=method, correction=correction) like[method] = snr[method] ** 2 / 2 + loglike[method] if snr[method] < 6: like[method] = 0 if ((snr["coh"] ** 2 - snr["right"] ** 2) < 2) or \ ((snr["coh"] ** 2 - snr["left"] ** 2) < 2): like["coh"] = 0 like_approx = np.logaddexp(np.logaddexp(like["left"], like["right"]), like["coh"]) return like_approx, like