Skip to content

Commit

Permalink
Start getting recent stat changes into module
Browse files Browse the repository at this point in the history
  • Loading branch information
GarethCabournDavies committed Apr 18, 2024
1 parent d71442e commit 3c73ec6
Showing 1 changed file with 41 additions and 27 deletions.
68 changes: 41 additions & 27 deletions pycbc/events/stat.py
Original file line number Diff line number Diff line change
Expand Up @@ -760,8 +760,10 @@ def coinc_lim_for_thresh(
if not self.has_hist:
self.get_hist()

fixed_stat = [b["snglstat"] for a, b in sngls_list if a != limifo][0]
s1 = thresh**2.0 - fixed_stat**2.0
fixed_stat_sq = sum(
[b["snglstat"] ** 2 for a, b in sngls_list if a != limifo]
)
s1 = thresh ** 2.0 - fixed_stat_sq
# Assume best case scenario and use maximum signal rate
s1 -= 2.0 * self.hist_max
s1[s1 < 0] = 0
Expand Down Expand Up @@ -838,6 +840,9 @@ def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
self.kwargs.get("minimum_statistic_cutoff", -30.0)
)

# This will be used to keep track of the template number being used
self.curr_tnum = None

# Go through the keywords and add class information as needed:
if self.kwargs["sensitive_volume"]:
# Add network sensitivity beckmark
Expand All @@ -850,7 +855,6 @@ def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs):
axis=0,
)
self.benchmark_logvol = 3.0 * numpy.log(hl_net_med_sigma)
self.curr_tnum = None

if self.kwargs["dq"]:
# Reweight the noise rate by the dq reweighting factor
Expand Down Expand Up @@ -959,11 +963,6 @@ def setup_segments(self, key):
def find_dq_noise_rate(self, trigs, dq_state):
"""Get dq values for a specific ifo and dq states"""

try:
tnum = trigs.template_num
except AttributeError:
tnum = trigs["template_id"]

try:
ifo = trigs.ifo
except AttributeError:
Expand All @@ -976,10 +975,10 @@ def find_dq_noise_rate(self, trigs, dq_state):

if ifo in self.dq_rates_by_state:
for i, st in enumerate(dq_state):
if isinstance(tnum, numpy.ndarray):
bin_name = self.dq_bin_by_tid[ifo][tnum[i]]
if isinstance(self.curr_tnum, numpy.ndarray):
bin_name = self.dq_bin_by_tid[ifo][self.curr_tnum[i]]
else:
bin_name = self.dq_bin_by_tid[ifo][tnum]
bin_name = self.dq_bin_by_tid[ifo][self.curr_tnum]
dq_val[i] = self.dq_rates_by_state[ifo][bin_name][st]
return dq_val

Expand Down Expand Up @@ -1100,8 +1099,8 @@ def find_fits(self, trigs):

# fits_by_tid is a dictionary of dictionaries of arrays
# indexed by ifo / coefficient name / template_id
alphai = self.fits_by_tid[ifo]["smoothed_fit_coeff"][tnum]
ratei = self.fits_by_tid[ifo]["smoothed_rate_above_thresh"][tnum]
alphai = self.fits_by_tid[ifo]["smoothed_fit_coeff"][self.curr_tnum]
ratei = self.fits_by_tid[ifo]["smoothed_rate_above_thresh"][self.curr_tnum]
thresh = self.fits_by_tid[ifo]["thresh"]

return alphai, ratei, thresh
Expand Down Expand Up @@ -1162,6 +1161,15 @@ def lognoiserate(self, trigs):
lognoisel: numpy.array
Array of log noise rate density for each input trigger.
"""
# What is the template number currently being used?
try:
# exists if accessed via coinc_findtrigs, this is a number
self.curr_tnum = trigs.template_num
except AttributeError:
# exists for SingleDetTriggers & pycbc_live get_coinc,
# this is a numpy array
self.curr_tnum = trigs["template_id"]

alphai, ratei, thresh = self.find_fits(trigs)
sngl_stat = self.get_sngl_ranking(trigs)
lognoisel = (
Expand Down Expand Up @@ -1237,14 +1245,6 @@ def single(self, trigs):
if self.kwargs["sensitive_volume"]:
# populate fields to allow sensitive volume factor calculation
singles["sigmasq"] = trigs["sigmasq"][:]
try:
# exists if accessed via coinc_findtrigs
self.curr_tnum = trigs.template_num
except AttributeError:
# exists for SingleDetTriggers
self.curr_tnum = trigs["template_id"]
# Should only be one ifo fit file provided
assert len(self.ifos) == 1
# Store benchmark log volume as single-ifo information since
# the ranking methods do not have access to template id
singles["benchmark_logvol"] = self.benchmark_logvol[self.curr_tnum]
Expand All @@ -1256,9 +1256,14 @@ def single(self, trigs):
if self.kwargs["chirp_mass"]:
from pycbc.conversions import mchirp_from_mass1_mass2

self.curr_mchirp = mchirp_from_mass1_mass2(
trigs.param["mass1"], trigs.param["mass2"]
)
try:
mass1 = trigs.param['mass1']
mass2 = trigs.param['mass2']
except AttributeError:
mass1 = trigs['mass1']
mass2 = trigs['mass2']
self.curr_mchirp = mchirp_from_mass1_mass2(mass1, mass2)

return numpy.array(singles, ndmin=1)

def sensitive_volume_factor(self, sngls):
Expand Down Expand Up @@ -1323,7 +1328,11 @@ def logsignalrate_shared(self, sngls_info):

if self.kwargs["chirp_mass"]:
# chirp mass reweighting
mchirp = min(self.curr_mchirp, self.mcm)
if isinstance(self.curr_mchirp, numpy.ndarray):
mchirp = numpy.minimum(self.curr_mchirp, self.mcm)
else:
# curr_mchirp will be a number
mchirp = min(self.curr_mchirp, self.mcm)
sr_factor += numpy.log((mchirp / 20.0) ** (11.0 / 3.0))

if self.kwargs["kde"]:
Expand Down Expand Up @@ -1400,8 +1409,11 @@ def rank_stat_coinc(
# Basic noise rate: sum of noise rates multiplied by the
# window they can form coincidences in
ln_noise_rate = coinc_rate.combination_noise_lograte(
sngl_dict, kwargs["time_addition"]
sngl_dict,
kwargs["time_addition"],
dets=kwargs.get('dets', None),
)

ln_noise_rate -= self.benchmark_lograte

# Basic option is not to have any signal-based assumptions,
Expand All @@ -1414,7 +1426,9 @@ def rank_stat_coinc(
# normalized
# Extent of time-difference space occupied
noise_twindow = coinc_rate.multiifo_noise_coincident_area(
self.hist_ifos, kwargs["time_addition"]
self.hist_ifos,
kwargs["time_addition"],
dets=kwargs.get('dets', None),
)
# Volume is the allowed time difference window, multiplied by 2pi
# for each phase difference dimension and by allowed range of SNR
Expand Down

0 comments on commit 3c73ec6

Please sign in to comment.