From 6db484e1e76f23d409cef984f08520673626cbc7 Mon Sep 17 00:00:00 2001 From: hoangstephanie <105010876+hoangstephanie@users.noreply.github.com> Date: Fri, 21 Apr 2023 10:14:42 +0200 Subject: [PATCH 01/32] Make autogating work for the PyCBC Live early-warning search (#4298) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Apply autogating on the last 16 s of the buffer instead of the analysis chunk only Add 2 command lines to the PyCBC Live run script to set the length and overlap in seconds of the segments used to estimate the PSD with Welchs method * Fix help messages of --autogating-psd-duration and --autogating-psd-stride * Set autogating_psd_duration and autogating_psd_stride in __init__ function of StrainBuffer class * Forgot to change detect_loud_glitches(strain) to detect_loud_glitches(self.strain) * Change --autogating-psd-duration and --autogating-psd-stride from type=int to type=float * Add documentation of the new kwargs autogating_psd_duration and autogating_psd_stride * Change the option name of —autogating-psd-duration to —autogating-psd-segment-length. Add --autogating-duration option for PyCBC Live run script to set the amount of data we want to apply autogating on. The default value is set to 16 s. Move variables « autogating_duration_length » and « autogating_start_sample » inside the « if self.autogating_threshold it not None » block since those variables are only used inside that block. * Add missing , * Fix typo * Change comment message since the command --autogating-duration has been added and 16 s is not a hardcoded value anymore. --- bin/pycbc_live | 11 +++++++++++ pycbc/strain/strain.py | 21 +++++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index 78fceaa6306..c5bf3b07b3d 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -790,6 +790,17 @@ parser.add_argument('--autogating-taper', type=float, metavar='SECONDS', help='Taper the strain before and after ' 'each gating window over a duration ' 'of SECONDS.') +parser.add_argument('--autogating-duration', type=float, default=16, + metavar='SECONDS', + help='Amount of data in seconds to apply autogating on.') +parser.add_argument('--autogating-psd-segment-length', type=float, default=2, + metavar='SECONDS', + help='Length in seconds of each segment used to estimate the PSD ' + 'with Welchs method and median averaging.') +parser.add_argument('--autogating-psd-stride', type=float, default=1, + metavar='SECONDS', + help='Length in seconds of the overlap between each segment used ' + 'to estimate the PSD with Welchs method and median averaging.') parser.add_argument('--sync', action='store_true', help="Imposes an MPI synchronization at each transfer of" diff --git a/pycbc/strain/strain.py b/pycbc/strain/strain.py index 0eea0e7c9e9..2c90de6b9a9 100644 --- a/pycbc/strain/strain.py +++ b/pycbc/strain/strain.py @@ -1433,6 +1433,9 @@ def __init__(self, frame_src, channel_name, start_time, autogating_pad=None, autogating_width=None, autogating_taper=None, + autogating_duration=None, + autogating_psd_segment_length=None, + autogating_psd_stride=None, state_channel=None, data_quality_channel=None, idq_channel=None, @@ -1490,6 +1493,12 @@ def __init__(self, frame_src, channel_name, start_time, Half-duration of the zeroed-out portion of autogates. autogating_taper: float, Optional Duration of taper on either side of the gating window in seconds. + autogating_duration: float, Optional + Amount of data in seconds to apply autogating on. + autogating_psd_segment_length: float, Optional + The length in seconds of each segment used to estimate the PSD with Welch's method. + autogating_psd_stride: float, Optional + The overlap in seconds between each segment used to estimate the PSD with Welch's method. state_channel: {str, None}, Optional Channel to use for state information about the strain data_quality_channel: {str, None}, Optional @@ -1598,6 +1607,9 @@ def __init__(self, frame_src, channel_name, start_time, self.autogating_pad = autogating_pad self.autogating_width = autogating_width self.autogating_taper = autogating_taper + self.autogating_duration = autogating_duration + self.autogating_psd_segment_length = autogating_psd_segment_length + self.autogating_psd_stride = autogating_psd_stride self.gate_params = [] self.sample_rate = sample_rate @@ -1894,9 +1906,11 @@ def advance(self, blocksize, timeout=10): # apply gating if needed if self.autogating_threshold is not None: + autogating_duration_length = self.autogating_duration * self.sample_rate + autogating_start_sample = int(len(self.strain) - autogating_duration_length) glitch_times = detect_loud_glitches( - strain[:-self.corruption], - psd_duration=2., psd_stride=1., + self.strain[autogating_start_sample:-self.corruption], + psd_duration=self.autogating_psd_segment_length, psd_stride=self.autogating_psd_stride, threshold=self.autogating_threshold, cluster_window=self.autogating_cluster, low_freq_cutoff=self.highpass_frequency, @@ -1968,6 +1982,9 @@ def from_cli(cls, ifo, args, maxlen): autogating_pad=args.autogating_pad, autogating_width=args.autogating_width, autogating_taper=args.autogating_taper, + autogating_duration=args.autogating_duration, + autogating_psd_segment_length=args.autogating_psd_segment_length, + autogating_psd_stride=args.autogating_psd_stride, psd_abort_difference=args.psd_abort_difference, psd_recalculate_difference=args.psd_recalculate_difference, force_update_cache=args.force_update_cache, From dfd8bddd7cb4e575797801ba007ebe722ab115e4 Mon Sep 17 00:00:00 2001 From: Thomas Dent Date: Thu, 27 Apr 2023 21:54:06 +0200 Subject: [PATCH 02/32] Add mchirp / tau0 direct conversions (#4337) * Add mchirp / tau0 direct conversions * Consistency tests for mchirp-tau0 conversion --- pycbc/conversions.py | 40 ++++++++++++++++++++++++++++++---------- test/test_conversions.py | 2 ++ 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/pycbc/conversions.py b/pycbc/conversions.py index 3cadef1f2d9..fb75a13f71d 100644 --- a/pycbc/conversions.py +++ b/pycbc/conversions.py @@ -140,12 +140,12 @@ def invq_from_mass1_mass2(mass1, mass2): def eta_from_mass1_mass2(mass1, mass2): """Returns the symmetric mass ratio from mass1 and mass2.""" - return mass1*mass2 / (mass1+mass2)**2. + return mass1*mass2 / (mass1 + mass2)**2. def mchirp_from_mass1_mass2(mass1, mass2): """Returns the chirp mass from mass1 and mass2.""" - return eta_from_mass1_mass2(mass1, mass2)**(3./5) * (mass1+mass2) + return eta_from_mass1_mass2(mass1, mass2)**(3./5) * (mass1 + mass2) def mass1_from_mtotal_q(mtotal, q): @@ -155,7 +155,7 @@ def mass1_from_mtotal_q(mtotal, q): (heavier) mass. If q < 1, the returned mass will be the secondary (lighter) mass. """ - return q*mtotal / (1.+q) + return q*mtotal / (1. + q) def mass2_from_mtotal_q(mtotal, q): @@ -165,7 +165,7 @@ def mass2_from_mtotal_q(mtotal, q): (lighter) mass. If q < 1, the returned mass will be the primary (heavier) mass. """ - return mtotal / (1.+q) + return mtotal / (1. + q) def mass1_from_mtotal_eta(mtotal, eta): @@ -185,7 +185,7 @@ def mass2_from_mtotal_eta(mtotal, eta): def mtotal_from_mchirp_eta(mchirp, eta): """Returns the total mass from the chirp mass and symmetric mass ratio. """ - return mchirp / (eta**(3./5.)) + return mchirp / eta**(3./5.) def mass1_from_mchirp_eta(mchirp, eta): @@ -219,7 +219,7 @@ def _mass2_from_mchirp_mass1(mchirp, mass1): This has 3 solutions but only one will be real. """ a = mchirp**5 / mass1**3 - roots = numpy.roots([1,0,-a,-a*mass1]) + roots = numpy.roots([1, 0, -a, -a * mass1]) # Find the real one real_root = roots[(abs(roots - roots.real)).argmin()] return real_root.real @@ -261,7 +261,7 @@ def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False, float The other component mass. """ - roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.]) + roots = numpy.roots([eta, (2*eta - 1) * known_mass, eta * known_mass**2.]) if force_real: roots = numpy.real(roots) if known_is_secondary: @@ -298,25 +298,28 @@ def eta_from_q(q): Note that the mass ratio may be either < 1 or > 1. """ - return q / (1.+q)**2 + return q / (1. + q)**2 def mass1_from_mchirp_q(mchirp, q): """Returns the primary mass from the given chirp mass and mass ratio.""" - mass1 = (q**(2./5.))*((1.0 + q)**(1./5.))*mchirp + mass1 = q**(2./5.) * (1.0 + q)**(1./5.) * mchirp return mass1 + def mass2_from_mchirp_q(mchirp, q): """Returns the secondary mass from the given chirp mass and mass ratio.""" - mass2 = (q**(-3./5.))*((1.0 + q)**(1./5.))*mchirp + mass2 = q**(-3./5.) * (1.0 + q)**(1./5.) * mchirp return mass2 + def _a0(f_lower): """Used in calculating chirp times: see Cokelaer, arxiv.org:0706.4437 appendix 1, also lalinspiral/python/sbank/tau0tau3.py. """ return 5. / (256. * (numpy.pi * f_lower)**(8./3.)) + def _a3(f_lower): """Another parameter used for chirp times""" return numpy.pi / (8. * (numpy.pi * f_lower)**(5./3.)) @@ -332,6 +335,15 @@ def tau0_from_mtotal_eta(mtotal, eta, f_lower): return _a0(f_lower) / (mtotal**(5./3.) * eta) +def tau0_from_mchirp(mchirp, f_lower): + r"""Returns :math:`\tau_0` from the chirp mass and the given frequency. + """ + # convert to seconds + mchirp = mchirp * lal.MTSUN_SI + # formulae from arxiv.org:0706.4437 + return _a0(f_lower) / mchirp ** (5./3.) + + def tau3_from_mtotal_eta(mtotal, eta, f_lower): r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and the given frequency. @@ -358,6 +370,14 @@ def tau3_from_mass1_mass2(mass1, mass2, f_lower): return tau3_from_mtotal_eta(mtotal, eta, f_lower) +def mchirp_from_tau0(tau0, f_lower): + r"""Returns chirp mass from :math:`\tau_0` and the given frequency. + """ + mchirp = (_a0(f_lower) / tau0) ** (3./5.) # in seconds + # convert back to solar mass units + return mchirp / lal.MTSUN_SI + + def mtotal_from_tau0_tau3(tau0, tau3, f_lower, in_seconds=False): r"""Returns total mass from :math:`\tau_0, \tau_3`.""" diff --git a/test/test_conversions.py b/test/test_conversions.py index 1b721098064..0e32f14189b 100644 --- a/test/test_conversions.py +++ b/test/test_conversions.py @@ -126,8 +126,10 @@ def test_round_robin(self): (conversions.eta_from_q, (self.q,), 'eta'), (conversions.mass1_from_mchirp_q, (self.mchirp, self.q), 'mp'), (conversions.mass2_from_mchirp_q, (self.mchirp, self.q), 'ms'), + (conversions.tau0_from_mchirp, (self.mchirp, self.f_lower), 'tau0'), (conversions.tau0_from_mass1_mass2, (self.m1, self.m2, self.f_lower), 'tau0'), (conversions.tau3_from_mass1_mass2, (self.m1, self.m2, self.f_lower), 'tau3'), + (conversions.mchirp_from_tau0, (self.tau0, self.f_lower), 'mchirp'), (conversions.mtotal_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'mtotal'), (conversions.eta_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'eta'), (conversions.mass1_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'mp'), From 298677c25ba53f2fc9bd9600da33bd6a979526ba Mon Sep 17 00:00:00 2001 From: Thomas Dent Date: Wed, 17 May 2023 13:30:25 +0200 Subject: [PATCH 03/32] missed update to __all__ (#4357) No-one spotted this ... --- pycbc/conversions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pycbc/conversions.py b/pycbc/conversions.py index fb75a13f71d..d4c67ecb957 100644 --- a/pycbc/conversions.py +++ b/pycbc/conversions.py @@ -1625,6 +1625,7 @@ def nltides_gw_phase_diff_isco(f_low, f0, amplitude, n, m1, m2): 'mass1_from_mass2_eta', 'eta_from_q', 'mass1_from_mchirp_q', 'mass2_from_mchirp_q', 'tau0_from_mtotal_eta', 'tau3_from_mtotal_eta', 'tau0_from_mass1_mass2', + 'tau0_from_mchirp', 'mchirp_from_tau0', 'tau3_from_mass1_mass2', 'mtotal_from_tau0_tau3', 'eta_from_tau0_tau3', 'mass1_from_tau0_tau3', 'mass2_from_tau0_tau3', 'primary_spin', 'secondary_spin', From 968912a8fc07ac9055a7a3b7a5ccfe94e45f5bfe Mon Sep 17 00:00:00 2001 From: Thomas Dent Date: Tue, 9 May 2023 16:45:57 +0200 Subject: [PATCH 04/32] Use constant chirp time window in snr optimizer (#4338) * Use constant chirp time window for mchirp bounds * syntax error is bad * update snr-max branch (#4343) * workaround for search issue and rtd theme (#4340) * Add mchirp / tau0 direct conversions (#4337) * Add mchirp / tau0 direct conversions * Consistency tests for mchirp-tau0 conversion --------- Co-authored-by: Alex Nitz * add apparently needed import * adjust eta bounds to be sane/useful * if max mtotal is 500 max mchirp should be closer to 200 --------- Co-authored-by: Alex Nitz --- bin/pycbc_optimize_snr | 67 ++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index 8c6b05059d1..aac49d176a3 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -15,10 +15,7 @@ from pycbc import ( from pycbc.types import MultiDetOptionAction, zeros, load_frequencyseries from pycbc.filter import matched_filter_core import pycbc.waveform.bank -from pycbc.conversions import ( - mchirp_from_mass1_mass2, mtotal_from_mchirp_eta, - mass1_from_mtotal_eta, mass2_from_mtotal_eta -) +import pycbc.conversions as cv from pycbc.io import live from pycbc.io.hdf import load_hdf5_to_dict from pycbc.detector import Detector @@ -29,6 +26,12 @@ try: except: ps = None +# Set a minimum mass for points tried in optimization allowing for +# minimal slop relative to the lightest template +MIN_CPT_MASS = 0.99 + +# Set a large maximum total mass +MAX_MTOTAL = 500. Nfeval = 0 start_time = time.time() @@ -56,12 +59,12 @@ def compute_network_snr_core(v, *argv, raise_err=False): delta_f = argv[7] sample_rate = argv[8] distance = 1.0 / DYN_RANGE_FAC - mtotal = mtotal_from_mchirp_eta(v[0], v[1]) - mass1 = mass1_from_mtotal_eta(mtotal, v[1]) - mass2 = mass2_from_mtotal_eta(mtotal, v[1]) + mtotal = cv.mtotal_from_mchirp_eta(v[0], v[1]) + mass1 = cv.mass1_from_mtotal_eta(mtotal, v[1]) + mass2 = cv.mass2_from_mtotal_eta(mtotal, v[1]) # enforce broadly accepted search space boundaries - if mass1 < 1 or mass2 < 1 or mtotal > 500: + if mass1 < MIN_CPT_MASS or mass2 < MIN_CPT_MASS or mtotal > MAX_MTOTAL: return -numpy.inf, {} try: @@ -70,7 +73,7 @@ def compute_network_snr_core(v, *argv, raise_err=False): approximant=approximant, mass1=mass1, mass2=mass2, spin1z=v[2], spin2z=v[3], f_lower=flow, f_final=f_end, delta_f=delta_f, - delta_t=1.0/sample_rate, distance=distance) + delta_t=1./sample_rate, distance=distance) except RuntimeError: if raise_err: raise @@ -119,8 +122,8 @@ def compute_minus_network_snr(v, *argv): def compute_minus_network_snr_pso(v, *argv, **kwargs): argv = kwargs['args'] - nsnr_array = numpy.array([-compute_network_snr_core(v_i, *argv)[0] for v_i in v]) - return nsnr_array + nsnr_array = numpy.array([compute_network_snr_core(v_i, *argv)[0] for v_i in v]) + return -nsnr_array def optimize_di(bounds, cli_args, extra_args): @@ -224,6 +227,10 @@ parser.add_argument('--approximant', required=True, parser.add_argument('--snr-threshold', default=4.0, help='If the SNR in ifo X is below this threshold do not ' 'consider it part of the coincidence. Not implemented') +parser.add_argument('--chirp-time-f-lower', default=20., + help='Starting frequency for chirp time window (Hz).') +parser.add_argument('--chirp-time-window', default=2., + help='Chirp time window (s).') parser.add_argument('--gracedb-server', metavar='URL', help='URL of GraceDB server API for uploading events. ' 'If not provided, the default URL is used.') @@ -327,19 +334,29 @@ sample_rate = fp['sample_rate'][()] extra_args = [data, coinc_times, coinc_ifos, flen, approximant, flow, f_end, delta_f, sample_rate] -mchirp = mchirp_from_mass1_mass2( - fp['mass1'][()], - fp['mass2'][()] -) -minchirp = mchirp * (1 - mchirp / 50.0) -maxchirp = mchirp * (1 + mchirp / 50.0) -minchirp = 1 if minchirp < 1 else minchirp -maxchirp = 80 if maxchirp > 80 else maxchirp - -# boundary of the optimization space (dict of (min, max) tuples) +# Determine chirp mass bounds from constant chirp time window +tau0flow = args.chirp_time_f_lower +tau0 = cv.tau0_from_mass1_mass2(fp['mass1'][()], fp['mass2'][()], tau0flow) +# Arbitrarily set max mchirp to 200 Msun if tau0 - window is too small +mintau0 = max(tau0 - args.chirp_time_window, + cv.tau0_from_mchirp(200., tau0flow)) +maxtau0 = tau0 + args.chirp_time_window +minchirp = cv.mchirp_from_tau0(maxtau0, tau0flow) +maxchirp = cv.mchirp_from_tau0(mintau0, tau0flow) +# Check basic sanity +assert minchirp > 0.5 +assert minchirp < maxchirp +assert maxchirp < 250. + +# Establish minimum eta: find the most asymmetric mass point +# nb function name is 'mass2' but it doesn't enforce m2 < m1! +maxm1 = cv._mass2_from_mchirp_mass1(maxchirp, MIN_CPT_MASS) +mineta = max(cv.eta_from_mass1_mass2(maxm1, MIN_CPT_MASS), 0.01) + +# Boundaries of the optimization space: dict of (min, max) tuples bounds = { 'mchirp': (minchirp, maxchirp), - 'eta': (0.01, 0.2499), + 'eta': (mineta, 0.24999999999), 'spin1z': (-0.9, 0.9), 'spin2z': (-0.9, 0.9) } @@ -360,9 +377,9 @@ with scheme_context: _, snr_series_dict = compute_network_snr_core(opt_params, *extra_args) -mtotal = mtotal_from_mchirp_eta(opt_params[0], opt_params[1]) -mass1 = mass1_from_mtotal_eta(mtotal, opt_params[1]) -mass2 = mass2_from_mtotal_eta(mtotal, opt_params[1]) +mtotal = cv.mtotal_from_mchirp_eta(opt_params[0], opt_params[1]) +mass1 = cv.mass1_from_mtotal_eta(mtotal, opt_params[1]) +mass2 = cv.mass2_from_mtotal_eta(mtotal, opt_params[1]) spin1z = opt_params[2] spin2z = opt_params[3] From 8c0ecf43545e874940b28b79dedfcbebb7c07977 Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Wed, 17 May 2023 16:39:05 +0200 Subject: [PATCH 05/32] Fix edge case in PyCBC Live's check for horizon distance (#4362) * Fix edge case in check for horizon distance * Suggestion by Gareth; improve docstring & comments --- bin/pycbc_live | 11 ++++------- pycbc/strain/strain.py | 21 +++++++++++++++++++++ 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index c5bf3b07b3d..3a0389c25ed 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -1092,13 +1092,10 @@ with ctx: else: psd_count[ifo] -= 1 - if data_reader[ifo].psd is not None: - dist = data_reader[ifo].psd.dist - if dist < args.min_psd_abort_distance or dist > args.max_psd_abort_distance: - logging.info("%s PSD dist %s outside acceptable range [%s, %s]", - ifo, dist, args.min_psd_abort_distance, - args.max_psd_abort_distance) - status = False + status &= data_reader[ifo].check_psd_dist( + args.min_psd_abort_distance, + args.max_psd_abort_distance + ) if status is True: evnt.live_detectors.add(ifo) diff --git a/pycbc/strain/strain.py b/pycbc/strain/strain.py index 2c90de6b9a9..2793012d565 100644 --- a/pycbc/strain/strain.py +++ b/pycbc/strain/strain.py @@ -1711,6 +1711,27 @@ def recalculate_psd(self): logging.info("Recalculating %s PSD, %s", self.detector, psd.dist) return True + def check_psd_dist(self, min_dist, max_dist): + """Check that the horizon distance of a detector is within a required + range. If so, return True, otherwise log a warning and return False. + """ + if self.psd is None: + # ignore check + return True + # Note that the distance can in principle be inf or nan, e.g. if h(t) + # is identically zero. The check must fail in those cases. Be careful + # with how the logic works out when comparing inf's or nan's! + good = self.psd.dist >= min_dist and self.psd.dist <= max_dist + if not good: + logging.info( + "%s PSD dist %s outside acceptable range [%s, %s]", + self.detector, + self.psd.dist, + min_dist, + max_dist + ) + return good + def overwhitened_data(self, delta_f): """ Return overwhitened data From 6a6912462e852a66ba1cab3002f0a189dfaae29b Mon Sep 17 00:00:00 2001 From: Gareth S Cabourn Davies Date: Thu, 25 May 2023 10:39:30 +0100 Subject: [PATCH 06/32] Live single-trigger combined fits date order bug (#4369) * Fix bug where counts/coefficients were not in the right order, make plots a bit prettier * TDC comments * Reorder operations --- bin/live/pycbc_live_combine_single_fits | 42 +++++----- bin/live/pycbc_live_plot_combined_single_fits | 76 +++++++++++-------- 2 files changed, 70 insertions(+), 48 deletions(-) diff --git a/bin/live/pycbc_live_combine_single_fits b/bin/live/pycbc_live_combine_single_fits index d65cfc9b963..4db23d03d1f 100644 --- a/bin/live/pycbc_live_combine_single_fits +++ b/bin/live/pycbc_live_combine_single_fits @@ -60,7 +60,8 @@ with h5py.File(args.trfits_files[0], 'r') as fit_f0: live_times = {ifo : [] for ifo in args.ifos} -trigger_file_times = [] +trigger_file_starts = [] +trigger_file_ends = [] n_files = len(args.trfits_files) logging.info("Checking through %d files", n_files) @@ -77,16 +78,18 @@ for f in args.trfits_files: assert all(fits_f['bins_lower'][:] == bl) assert all(fits_f['bins_upper'][:] == bu) - # Get the time of the last trigger in the trigger_fits file + # Get the time of the first/last triggers in the trigger_fits file gps_last = 0 + gps_first = np.inf for ifo in args.ifos: if ifo not in fits_f: continue else: trig_times = fits_f[ifo]['triggers']['end_time'][:] - gps_last = max(gps_last, - trig_times.max()) - trigger_file_times.append(gps_last) + gps_last = max(gps_last, trig_times.max()) + gps_first = min(gps_first, trig_times.min()) + trigger_file_starts.append(gps_first) + trigger_file_ends.append(gps_last) for ifo in args.ifos: if ifo not in fits_f: @@ -102,16 +105,16 @@ for f in args.trfits_files: logging.info(fits_f[ifo + '/fit_coeff'][:]) fits_f.close() -# Set up the date array, this is stored as an offset from the start date of -# the combination to the end of the trigger_fits file. -# This allows for missing or empty days +# Set up the date array, this is stored as an offset from the first trigger time of +# the first file to the last trigger of the file -trigger_file_times = np.array(trigger_file_times) -ad_order = np.argsort(np.array(trigger_file_times)) -start_date_n = trigger_file_times[ad_order[0]] -ad = trigger_file_times[ad_order] - start_date_n +trigger_file_starts = np.array(trigger_file_starts) +trigger_file_ends = np.array(trigger_file_ends) +ad_order = np.argsort(trigger_file_starts) +start_time_n = trigger_file_starts[ad_order[0]] +ad = trigger_file_ends[ad_order] - start_time_n -# Get the counts and alphas sorted by bin rather than by date +# Get the counts and alphas counts_bin = {ifo: [c for c in zip(*counts_all[ifo])] for ifo in args.ifos} alphas_bin = {ifo: [a for a in zip(*alphas_all[ifo])] for ifo in args.ifos} @@ -126,7 +129,7 @@ fout.attrs['fit_threshold'] = fit_thresh fout.attrs['conservative_percentile'] = args.conservative_percentile fout.attrs['ifos'] = args.ifos fout['bins_edges'] = list(bl) + [bu[-1]] -fout['fits_dates'] = ad + start_date_n +fout['fits_dates'] = ad + start_time_n for ifo in args.ifos: fout.create_group(ifo) @@ -143,13 +146,16 @@ for ifo in args.ifos: alpha_all = np.mean(counts_bin[ifo], axis=0) / invalphan_all meant = l_times.mean() - fout_ifo[f'separate_fits/live_times'] = l_times - fout_ifo[f'separate_fits/date'] = ad + start_date_n + fout_ifo[f'separate_fits/live_times'] = l_times[ad_order] + fout_ifo[f'separate_fits/start_time'] = trigger_file_starts[ad_order] + fout_ifo[f'separate_fits/end_time'] = trigger_file_ends[ad_order] + for counter, a_c_u_l in enumerate(zip(alphas_bin[ifo], counts_bin[ifo], bu, bl)): a, c, u, l = a_c_u_l - a = np.array(a) - c = np.array(c) + # Sort alpha and counts by date + a = np.array(a)[ad_order] + c = np.array(c)[ad_order] invalphan = c / a mean_alpha = c.mean() / invalphan.mean() cons_alpha = np.percentile(a, 100 - args.conservative_percentile) diff --git a/bin/live/pycbc_live_plot_combined_single_fits b/bin/live/pycbc_live_plot_combined_single_fits index 3b16be3eac2..c5db51af499 100644 --- a/bin/live/pycbc_live_plot_combined_single_fits +++ b/bin/live/pycbc_live_plot_combined_single_fits @@ -63,7 +63,8 @@ live_total = {} separate_alphas = {} separate_counts = {} -separate_dates = {} +separate_starts = {} +separate_ends = {} separate_times = {} logging.info("Loading Data") @@ -72,7 +73,6 @@ with h5py.File(args.combined_fits_file, 'r') as cff: bins_edges = cff['bins_edges'][:] conservative_percentile = cff.attrs['conservative_percentile'] n_bins = len(bins_edges) - 1 - fits_dates = cff['fits_dates'][:] for ifo in ifos: logging.info(ifo) live_total[ifo] = cff[ifo].attrs['live_time'] @@ -81,7 +81,8 @@ with h5py.File(args.combined_fits_file, 'r') as cff: cons_count[ifo] = cff[ifo]['conservative']['counts'][:] cons_alpha[ifo] = cff[ifo]['conservative']['fit_coeff'][:] - separate_dates[ifo] = cff[ifo]['separate_fits']['date'][:] + separate_starts[ifo] = cff[ifo]['separate_fits']['start_time'][:] + separate_ends[ifo] = cff[ifo]['separate_fits']['end_time'][:] separate_times[ifo] = cff[ifo]['separate_fits']['live_times'][:] separate_data = cff[ifo]['separate_fits'] @@ -108,6 +109,20 @@ def bin_proportion(upper, lower, log_spacing=False): else: return ((lower + upper) / 2. - bin_min) / (bin_max - bin_min) +# Set up the x ticks - note that these are rounded to the nearest +# midnight, so may not line up exactly with the data +min_start = min([separate_starts[ifo].min() for ifo in ifos]) +max_end = max([separate_ends[ifo].max() for ifo in ifos]) + +xtix = [] +xtix_labels = [] +t = min_start +while t < max_end: + # Strip off the time information, ticks are at midnight + time_dt = gpstime.gps_to_utc(t).date() + xtix_labels.append(time_dt.strftime("%Y-%m-%d")) + xtix.append(gpstime.utc_to_gps(time_dt).gpsSeconds) + t += 86400 logging.info("Plotting fits information") for ifo in ifos: @@ -119,24 +134,23 @@ for ifo in ifos: alpha_lines = [] count_lines = [] - start_date_dt = gpstime.gps_to_utc(separate_dates[ifo][0]) - start_date = start_date_dt.strftime("%Y-%m-%d %H:%M:%S") - for i, bl_bu in enumerate(zip(bin_starts, bin_ends)): bl, bu = bl_bu alphas = separate_alphas[ifo][i] counts = separate_counts[ifo][i] + # replace zeros with infinity, so that it is + # not plotted rather than plotted as zero valid = np.logical_and(alphas > 0, np.isfinite(alphas)) - + alphas[np.logical_not(valid)] = np.inf if not any(valid): + logging.warning("No valid fit coefficients for %s", ifo) continue - a = alphas[valid] - l_times = separate_times[ifo][valid] - rate = counts[valid] / l_times - ad = (separate_dates[ifo][valid] - separate_dates[ifo][0]) / 86400. + + l_times = separate_times[ifo] + rate = counts / l_times ma = mean_alpha[ifo][i] ca = cons_alpha[ifo][i] @@ -146,20 +160,19 @@ for ifo in ifos: bin_prop = bin_proportion(bu, bl, log_spacing=args.log_colormap) bin_colour = plt.get_cmap(args.colormap)(bin_prop) - - alpha_lines += ax_alpha.plot(ad, a, c=bin_colour, - label="duration %.2f-%.2f" % (bl, bu)) + bin_label = f"duration {bl:.2f}-{bu:.2f}" + alpha_lines += ax_alpha.plot(separate_starts[ifo], alphas, c=bin_colour, + label=bin_label) alpha_lines.append(ax_alpha.axhline(ma, label="total fit = %.2f" % ma, - c=bin_colour, linestyle='--',)) + c=bin_colour, linestyle='--',)) alpha_lab = f"{conservative_percentile:d}th %ile = {ca:.2f}" alpha_lines.append(ax_alpha.axhline(ca, c=bin_colour, linestyle=':', label=alpha_lab)) - count_lines += ax_count.plot(ad, rate, - c=bin_colour, - label="duration %.2f-%.2f" % (bl, bu)) + count_lines += ax_count.plot(separate_starts[ifo], rate, c=bin_colour, + label=bin_label) count_lines.append(ax_count.axhline(mr, c=bin_colour, linestyle='--', label=f"mean = {mr:.3f}")) @@ -168,21 +181,24 @@ for ifo in ifos: c=bin_colour, linestyle=':', label=count_lab)) - ax_alpha.set_xlabel('Days since ' + start_date) - ax_alpha.set_ylabel('Fit coefficient') - ax_alpha.set_xlim([ad[0], ad[-1]]) alpha_labels = [l.get_label() for l in alpha_lines] - ax_alpha.grid(zorder=-30) ax_alpha.legend(alpha_lines, alpha_labels, loc='lower center', - ncol=3, bbox_to_anchor=(0.5, 1.01)) - fig_alpha.tight_layout() - fig_alpha.savefig(args.output_plot_name_format.format(ifo=ifo, type='fit_coeffs')) - ax_count.set_xlabel('Days since ' + start_date) - ax_count.set_ylabel('Counts per live time') - ax_count.set_xlim([ad[0], ad[-1]]) + ncol=5, bbox_to_anchor=(0.5, 1.01)) + ax_alpha.set_ylabel('Fit coefficient') + count_labels = [l.get_label() for l in count_lines] ax_count.legend(count_lines, count_labels, loc='lower center', - ncol=3, bbox_to_anchor=(0.5, 1.01)) - ax_count.grid(zorder=-30) + ncol=5, bbox_to_anchor=(0.5, 1.01)) + ax_count.set_ylabel('Counts per live time') + + for ax in [ax_count, ax_alpha]: + ax.set_xticks(xtix) + ax.set_xticklabels(xtix_labels, rotation=90) + # Add 1/4 day padding either side + ax.set_xlim(xtix[0] - 21600, xtix[-1] + 21600) + ax.grid(zorder=-30) + fig_count.tight_layout() fig_count.savefig(args.output_plot_name_format.format(ifo=ifo, type='counts')) + fig_alpha.tight_layout() + fig_alpha.savefig(args.output_plot_name_format.format(ifo=ifo, type='fit_coeffs')) From 67cc123f858f7cdad7f3d659ed9d7ca7facf8f42 Mon Sep 17 00:00:00 2001 From: Gareth S Cabourn Davies Date: Mon, 17 Jul 2023 08:20:38 +0100 Subject: [PATCH 07/32] Add argument verification for singles (#4365) * Add argument verification for singles * Apply suggestions from code review * remove --enable-single-detector-background * Some more option-checking, and making fixed ifar option actually possible! * fix options in example * CC, typo * CC --- bin/pycbc_live | 10 ++-- examples/live/check_results.py | 7 ++- examples/live/run.sh | 3 +- pycbc/events/single.py | 92 +++++++++++++++++++++++++++------- 4 files changed, 86 insertions(+), 26 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index 3a0389c25ed..157c27549b3 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -860,8 +860,6 @@ parser.add_argument('--ifar-double-followup-threshold', type=float, required=Tru parser.add_argument('--pvalue-combination-livetime', type=float, required=True, help="Livetime used for p-value combination with followup " "detectors, in years") -parser.add_argument('--enable-single-detector-background', action='store_true', default=False) - parser.add_argument('--enable-gracedb-upload', action='store_true', default=False, help='Upload triggers to GraceDB') parser.add_argument('--enable-production-gracedb-upload', action='store_true', default=False, @@ -920,8 +918,11 @@ mchirp_area.insert_args(parser) livepau.insert_live_pastro_option_group(parser) args = parser.parse_args() + scheme.verify_processing_options(args, parser) fft.verify_fft_options(args, parser) +ifos = set(args.channel_name.keys()) +analyze_singles = LiveSingle.verify_args(args, parser, ifos) if args.output_background is not None and len(args.output_background) != 2: parser.error('--output-background takes two parameters: period and path') @@ -948,7 +949,6 @@ if bank.min_f_lower < args.low_frequency_cutoff: 'minimum f_lower across all templates ' '({} Hz)'.format(args.low_frequency_cutoff, bank.min_f_lower)) -ifos = set(args.channel_name.keys()) logging.info('Analyzing data from detectors %s', ppdets(ifos)) evnt = LiveEventManager(args, bank) @@ -1023,7 +1023,7 @@ with ctx: evnt.data_readers = data_reader # create single-detector background "estimators" - if args.enable_single_detector_background and evnt.rank == 0: + if analyze_singles and evnt.rank == 0: sngl_estimator = {ifo: LiveSingle.from_cli(args, ifo) for ifo in ifos} @@ -1160,7 +1160,7 @@ with ctx: evnt.check_coincs(list(results.keys()), best_coinc, psds) # Check for singles - if args.enable_single_detector_background: + if analyze_singles: evnt.check_singles(results, psds) gates = {ifo: data_reader[ifo].gate_params for ifo in data_reader} diff --git a/examples/live/check_results.py b/examples/live/check_results.py index 413e643ac62..31089efbe28 100755 --- a/examples/live/check_results.py +++ b/examples/live/check_results.py @@ -11,6 +11,7 @@ from pycbc.io.ligolw import LIGOLWContentHandler from ligo.lw.utils import load_filename as load_xml_doc from ligo.lw import lsctables +from pycbc import conversions as conv def close(a, b, c): @@ -130,7 +131,8 @@ def check_found_events(args): # create field array to store properties of triggers dtype = [('mass1', float), ('mass2', float), ('spin1z', float), ('spin2z', float), - ('tc', float), ('net_snr', float)] + ('tc', float), ('net_snr', float), + ('ifar', float)] trig_props = FieldArray(n_found, dtype=dtype) # store properties of found triggers @@ -139,18 +141,21 @@ def check_found_events(args): xmldoc = load_xml_doc( ctrigfp, False, contenthandler=LIGOLWContentHandler) si_table = lsctables.SnglInspiralTable.get_table(xmldoc) + ci_table = lsctables.CoincInspiralTable.get_table(xmldoc) trig_props['tc'][x] = si_table[0].end trig_props['mass1'][x] = si_table[0].mass1 trig_props['mass2'][x] = si_table[0].mass2 trig_props['spin1z'][x] = si_table[0].spin1z trig_props['spin2z'][x] = si_table[0].spin2z + trig_props['ifar'][x] = conv.sec_to_year(1 / ci_table[0].combined_far) snr_list = si_table.getColumnByName('snr').asarray() trig_props['net_snr'][x] = sum(snr_list ** 2) ** 0.5 log.info('Single-detector SNRs: %s', snr_list) log.info('Network SNR: %f', trig_props['net_snr'][x]) + log.info('IFAR: %f', trig_props['ifar'][x]) log.info('Merger time: %f', trig_props['tc'][x]) log.info('Mass 1: %f', trig_props['mass1'][x]) log.info('Mass 2: %f', trig_props['mass2'][x]) diff --git a/examples/live/run.sh b/examples/live/run.sh index 249382f8296..3cda2773664 100755 --- a/examples/live/run.sh +++ b/examples/live/run.sh @@ -195,12 +195,11 @@ python -m mpi4py `which pycbc_live` \ --src-class-eff-to-lum-distance 0.74899 \ --src-class-lum-distance-to-delta -0.51557 -0.32195 \ --run-snr-optimization \ ---enable-single-detector-background \ +--sngl-ifar-est-dist conservative \ --single-newsnr-threshold 9 \ --single-duration-threshold 7 \ --single-reduced-chisq-threshold 2 \ --single-fit-file single_trigger_fits.hdf \ ---sngl-ifar-est-dist conservative \ --verbose # note that, at this point, some SNR optimization processes may still be diff --git a/pycbc/events/single.py b/pycbc/events/single.py index 266e853de48..5312466fb57 100644 --- a/pycbc/events/single.py +++ b/pycbc/events/single.py @@ -57,29 +57,84 @@ def insert_args(parser): 'coefficients and counts for specific ' 'single trigger IFAR fitting.') parser.add_argument('--sngl-ifar-est-dist', nargs='+', - default='conservative', - choices=['conservative', 'mean', 'fixed'], action=MultiDetOptionAction, help='Which trigger distribution to use when ' 'calculating IFAR of single triggers. ' - 'Default conservative. Can be given as ' - 'a single value or as detector-value pairs, ' - 'e.g. H1:mean L1:mean V1:conservative') + 'Can be given as a single value or as ' + 'detector-value pairs, e.g. H1:mean ' + 'L1:mean V1:conservative') + + @staticmethod + def verify_args(args, parser, ifos): + sngl_opts = [args.single_reduced_chisq_threshold, + args.single_duration_threshold, + args.single_newsnr_threshold, + args.sngl_ifar_est_dist] + sngl_opts_str = ("--single-reduced-chisq-threshold, " + "--single-duration-threshold, " + "--single-newsnr-threshold, " + "--sngl-ifar-est-dist") + + if any(sngl_opts) and not all(sngl_opts): + parser.error(f"Single detector trigger options ({sngl_opts_str}) " + "must either all be given or none.") + + if args.enable_single_detector_upload \ + and not args.enable_gracedb_upload: + parser.error("--enable-single-detector-upload requires " + "--enable-gracedb-upload to be set.") + + sngl_optional_opts = [args.single_fixed_ifar, + args.single_fit_file] + sngl_optional_opts_str = ("--single-fixed-ifar, " + "--single-fit-file") + if any(sngl_optional_opts) and not all(sngl_opts): + parser.error("Optional singles options " + f"({sngl_optional_opts_str}) given but no " + f"required options ({sngl_opts_str}) are.") + + for ifo in ifos: + # Check which option(s) are needed for each IFO and if they exist: + + # Notes for the logic here: + # args.sngl_ifar_est_dist.default_set is True if single value has + # been set to be the same for all values + # bool(args.sngl_ifar_est_dist) is True if option is given + if args.sngl_ifar_est_dist and \ + not args.sngl_ifar_est_dist.default_set \ + and not args.sngl_ifar_est_dist[ifo]: + # Option has been given, different for each IFO, + # and this one is not present + parser.error("All IFOs required in --single-ifar-est-dist " + "if IFO-specific options are given.") + + if not args.sngl_ifar_est_dist[ifo] == 'fixed': + if not args.single_fit_file: + # Fixed IFAR option doesnt need the fits file + parser.error(f"Single detector trigger fits file must be " + "given if --single-ifar-est-dist is not " + f"fixed for all ifos (at least {ifo} has " + f"option {args.sngl_ifar_est_dist[ifo]}).") + if ifo in args.single_fixed_ifar: + parser.error(f"Value {args.single_fixed_ifar[ifo]} given " + f"for {ifo} in --single-fixed-ifar, but " + f"--single-ifar-est-dist for {ifo} " + f"is {args.sngl_ifar_est_dist[ifo]}, not " + "fixed.") + else: + # Check that the fixed IFAR value has actually been + # given if using this instead of a distribution + if not args.single_fixed_ifar[ifo]: + parser.error(f"--single-fixed-ifar must be " + "given if --single-ifar-est-dist is fixed. " + f"This is true for at least {ifo}.") + + # Return value is a boolean whether we are analysing singles or not + # The checks already performed mean that all(sngl_opts) is okay + return all(sngl_opts) @classmethod def from_cli(cls, args, ifo): - sngl_opts_required = all([args.single_fit_file, - args.single_reduced_chisq_threshold, - args.single_duration_threshold, - args.single_newsnr_threshold]) - if args.enable_single_detector_background and not sngl_opts_required: - raise RuntimeError("Single detector trigger options " - "(--single-fit-file, " - "--single-reduced-chisq-threshold, " - "--single-duration-threshold, " - "--single-newsnr-threshold) " - "must ALL be given if single detector " - "background is enabled") return cls( ifo, newsnr_threshold=args.single_newsnr_threshold[ifo], reduced_chisq_threshold=args.single_reduced_chisq_threshold[ifo], @@ -139,7 +194,8 @@ def check(self, trigs, data_reader): return candidate def calculate_ifar(self, sngl_ranking, duration): - if self.fixed_ifar: + logging.info("Calculating IFAR") + if self.fixed_ifar and self.ifo in self.fixed_ifar: return self.fixed_ifar[self.ifo] try: From 82e5a8e341b82395405b9fa5e8f2cd6e2abfe403 Mon Sep 17 00:00:00 2001 From: Arthur Tolley <32394213+ArthurTolley@users.noreply.github.com> Date: Tue, 13 Jun 2023 10:56:46 +0100 Subject: [PATCH 08/32] Added an OSError exception to MKL check (#4390) --- pycbc/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pycbc/__init__.py b/pycbc/__init__.py index b7a2d4aad27..53c35debfff 100644 --- a/pycbc/__init__.py +++ b/pycbc/__init__.py @@ -158,7 +158,7 @@ def makedir(path): try: import pycbc.fft.mkl HAVE_MKL=True -except ImportError: +except (ImportError, OSError): HAVE_MKL=False # Check for openmp suppport, currently we pressume it exists, unless on From 6a1e8ae813f73150877656b0208fcfc81737947f Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 26 Jun 2023 21:07:58 +0100 Subject: [PATCH 09/32] Move PyCBC venvs to new IGWN CVMFS server (#4412) * First part of moving venv build to igwn * TESTING HACK. REVERT ME BEFORE MERGING!!! * Try this * Testing * Revert "Testing" This reverts commit c0b9ea0c28117284adbaf84bebfcfa3167ec60eb. * Add new CVMFS commands * Remove rsync filter which isn't working * Add error wrap abort handler * Update tools/venv_transfer_commands.sh Co-authored-by: Duncan Macleod * Fix Ian's idiocy (attempt to) and HostChecking * Set to abort for one last test * One final publish test * Add a chmod on the root CVMFS directory * Prepare for merging * Fix mistake --------- Co-authored-by: Duncan Macleod --- tools/docker_build_dist.sh | 8 +++++--- tools/docker_build_prepssh.sh | 1 + tools/venv_transfer_commands.sh | 12 ++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 tools/venv_transfer_commands.sh diff --git a/tools/docker_build_dist.sh b/tools/docker_build_dist.sh index 3a02d056f22..db20424bb36 100755 --- a/tools/docker_build_dist.sh +++ b/tools/docker_build_dist.sh @@ -112,9 +112,11 @@ EOF echo -e "\\n>> [`date`] Deploying release ${SOURCE_TAG} to CVMFS" # remove lalsuite source and deploy on cvmfs rm -rf ${VENV_PATH}/src/lalsuite - ssh ouser.ligo@oasis-login.opensciencegrid.org "mkdir -p /home/login/ouser.ligo/ligo/deploy/sw/pycbc/${ENV_OS}/virtualenv/pycbc-${SOURCE_TAG}" - rsync --rsh=ssh $RSYNC_OPTIONS -qraz ${VENV_PATH}/ ouser.ligo@oasis-login.opensciencegrid.org:/home/login/ouser.ligo/ligo/deploy/sw/pycbc/${ENV_OS}/virtualenv/pycbc-${SOURCE_TAG}/ - ssh ouser.ligo@oasis-login.opensciencegrid.org osg-oasis-update + export RSYNC_OPTIONS VENV_PATH ENV_OS SOURCE_TAG + if ! bash /pycbc/tools/venv_transfer_commands.sh; then + ssh cvmfs.pycbc@cvmfs-software.ligo.caltech.edu "sudo -u repo.software cvmfs_server abort -f" + exit 1 + fi fi echo -e "\\n>> [`date`] virtualenv deployment complete" fi diff --git a/tools/docker_build_prepssh.sh b/tools/docker_build_prepssh.sh index 864f385e7b6..9c39bc63d9f 100644 --- a/tools/docker_build_prepssh.sh +++ b/tools/docker_build_prepssh.sh @@ -6,4 +6,5 @@ echo -e "Host sugwg-test1.phy.syr.edu\n\tStrictHostKeyChecking no\n" >> ~/.ssh/c echo -e "Host sugwg-condor.phy.syr.edu\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config ; echo -e "Host oasis-login.opensciencegrid.org\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config ; echo -e "Host code.pycbc.phy.syr.edu\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config ; +echo -e "@cert-authority * ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHa03AZF3CvJ1C4Po15swSaMYI4kPszyBH/uOKHQYvu+EpehSfMZMaX5D7pUpc5cAXvMEEFzlZJQH4pOioIlqyE= IGWN_CIT_SSH_CERT_AUTHORITY" >> ~/.ssh/known_hosts chmod 600 ~/.ssh/id_rsa ~/.ssh/config ~/.ssh/ldg_user ~/.ssh/ldg_token diff --git a/tools/venv_transfer_commands.sh b/tools/venv_transfer_commands.sh new file mode 100644 index 00000000000..0d793bb1907 --- /dev/null +++ b/tools/venv_transfer_commands.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +# Please don't try and run this script directly! +ssh cvmfs.pycbc@cvmfs-software.ligo.caltech.edu < Date: Wed, 28 Jun 2023 10:47:06 +0100 Subject: [PATCH 10/32] Allow SNR optimizer to use candidate point in initial array (#4393) * allow SNR optimizer to use the candidate point in the initial array of test points --- bin/pycbc_optimize_snr | 85 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 12 deletions(-) diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index aac49d176a3..670d7432de1 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -126,13 +126,28 @@ def compute_minus_network_snr_pso(v, *argv, **kwargs): return -nsnr_array -def optimize_di(bounds, cli_args, extra_args): - bounds = [ +def normalize_initial_point(initial_point, bounds): + return (initial_point - bounds[:,0]) / (bounds[:,1] - bounds[:,0]) + +def optimize_di(bounds, cli_args, extra_args, initial_point): + bounds = numpy.array([ bounds['mchirp'], bounds['eta'], bounds['spin1z'], bounds['spin2z'] - ] + ]) + + + # Currently only implemented for random seed initial array + rng = numpy.random.mtrand._rand + population_shape=(cli_args.di_popsize, 4) + population = rng.uniform(size=population_shape) + if cli_args.include_candidate_in_optimizer: + # Re-normalize the initial point into the correct range + point_init = normalize_initial_point(initial_point, bounds) + # add the initial point to the population + population = numpy.concatenate((population[:-1], point_init)) + results = differential_evolution( compute_minus_network_snr, bounds, @@ -142,12 +157,13 @@ def optimize_di(bounds, cli_args, extra_args): mutation=(0.5, 1), recombination=0.7, callback=callback_func, - args=extra_args + args=extra_args, + init=population ) return results.x -def optimize_shgo(bounds, cli_args, extra_args): +def optimize_shgo(bounds, cli_args, extra_args, initial_point): # pylint: disable=unused-argument bounds = [ bounds['mchirp'], bounds['eta'], @@ -164,30 +180,48 @@ def optimize_shgo(bounds, cli_args, extra_args): ) return results.x +def normalize_population(population, min_bounds, max_bounds): + norm_pop = min_bounds + population * (max_bounds - min_bounds) + + return norm_pop -def optimize_pso(bounds, cli_args, extra_args): +def optimize_pso(bounds, cli_args, extra_args, initial_point): options = { 'c1': cli_args.pso_c1, 'c2': cli_args.pso_c2, 'w': cli_args.pso_w } - min_bounds = [ + min_bounds = numpy.array([ bounds['mchirp'][0], bounds['eta'][0], bounds['spin1z'][0], bounds['spin2z'][0] - ] - max_bounds = [ + ]) + max_bounds = numpy.array([ bounds['mchirp'][1], bounds['eta'][1], bounds['spin1z'][1], bounds['spin2z'][1] - ] + ]) + + # Manually generate the initial points, this is the same as the default + # method, but allows us to make some modifications + population = numpy.random.uniform( + low=0.0, high=1.0, size=(cli_args.pso_particles, 4) + ) + population = normalize_population(population, min_bounds, max_bounds) + + if cli_args.include_candidate_in_optimizer: + # add the initial point to the population + population = numpy.concatenate((population[:-1], + initial_point)) + optimizer = ps.single.GlobalBestPSO( n_particles=cli_args.pso_particles, dimensions=4, options=options, - bounds=(min_bounds, max_bounds) + bounds=(min_bounds, max_bounds), + init_pos=population ) _, results = optimizer.optimize( compute_minus_network_snr_pso, @@ -280,6 +314,14 @@ parser.add_argument('--pso-c2', type=float, default=2.0, parser.add_argument('--pso-w', type=float, default=0.01, help='Only relevant for --optimizer pso: ' 'The hyperparameter w: the inertia parameter.') +parser.add_argument('--include-candidate-in-optimizer', action='store_true', + help='Include parameters of the candidate event in the ' + 'initialised array for the optimizer. Only relevant for ' + '--optimizer pso or differential_evolution') +parser.add_argument('--seed', type=int, + help='Seed to supply to the random generation of initial ' + 'array to pass to the optimizer. Only relevant for ' + '--optimizer pso or differential_evolution') scheme.insert_processing_option_group(parser) fft.insert_fft_option_group(parser) @@ -292,6 +334,9 @@ if args.optimizer == 'pso' and ps == None: parser.error('You need to install pyswarms to use the pso optimizer.') pycbc.init_logging(args.verbose) +if args.seed: + numpy.random.seed(args.seed) + scheme.verify_processing_options(args, parser) fft.verify_fft_options(args, parser) @@ -361,11 +406,27 @@ bounds = { 'spin2z': (-0.9, 0.9) } +if args.include_candidate_in_optimizer: + # Initial point from found candidate + mchirp_init = cv.mchirp_from_mass1_mass2(fp['mass1'][()], fp['mass2'][()]) + eta_init = cv.eta_from_mass1_mass2(fp['mass1'][()], fp['mass2'][()]) + spin1z_init = fp['spin1z'][()] + spin2z_init = fp['spin2z'][()] + + initial_point = numpy.array([ + mchirp_init, + eta_init, + spin1z_init, + spin2z_init, + ])[numpy.newaxis] +else: + initial_point = None + with scheme_context: logging.info('Starting optimization') optimize_func = optimize_funcs[args.optimizer] - opt_params = optimize_func(bounds, args, extra_args) + opt_params = optimize_func(bounds, args, extra_args, initial_point) logging.info('Optimization complete') From 276f339666254656a4fe4352655ec8428f2b0b6d Mon Sep 17 00:00:00 2001 From: Arthur Tolley <32394213+ArthurTolley@users.noreply.github.com> Date: Tue, 27 Jun 2023 11:38:04 +0100 Subject: [PATCH 11/32] pycbc_optimize_snr: Implement mass dependent bounds on spin parameters (#4176) * Applying spin bounds based upon object mass~ * Updated reference to correct paper * Adding spin bounds based on upper bounds of masses * removing unnecessary additions * Changing max mass2 to the right quantities * Creating maxeta + reusing maxm1 * minor cleanup * fixed min / max eta values * fixed min / max eta * Correcting maxm2 equation * more readable/less redundant comment --------- Co-authored-by: Thomas Dent --- bin/pycbc_optimize_snr | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index 670d7432de1..f0eea382558 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -33,6 +33,10 @@ MIN_CPT_MASS = 0.99 # Set a large maximum total mass MAX_MTOTAL = 500. +# Set a notional maximum and minimum possible eta +MAX_ETA = 0.24999999999 +MIN_ETA = 0.01 + Nfeval = 0 start_time = time.time() @@ -396,14 +400,24 @@ assert maxchirp < 250. # Establish minimum eta: find the most asymmetric mass point # nb function name is 'mass2' but it doesn't enforce m2 < m1! maxm1 = cv._mass2_from_mchirp_mass1(maxchirp, MIN_CPT_MASS) -mineta = max(cv.eta_from_mass1_mass2(maxm1, MIN_CPT_MASS), 0.01) +mineta = max(cv.eta_from_mass1_mass2(maxm1, MIN_CPT_MASS), MIN_ETA) + +# Use max chirp and max eta to find the upper bound on mass2 +maxm2 = cv.mass2_from_mchirp_eta(maxchirp, MAX_ETA) + +# Astrophysical spin bounds dependent on system masses - https://inspirehep.net/literature/1124003 +# If masses can be > 3 then allow them BH spin range +minspin1z = -0.4 if maxm1 < 3 else -0.9 +maxspin1z = 0.4 if maxm1 < 3 else 0.9 +minspin2z = -0.4 if maxm2 < 3 else -0.9 +maxspin2z = 0.4 if maxm2 < 3 else 0.9 -# Boundaries of the optimization space: dict of (min, max) tuples +# Boundary of the optimization space bounds = { 'mchirp': (minchirp, maxchirp), - 'eta': (mineta, 0.24999999999), - 'spin1z': (-0.9, 0.9), - 'spin2z': (-0.9, 0.9) + 'eta': (mineta, MAX_ETA), + 'spin1z': (minspin1z, maxspin1z), + 'spin2z': (minspin2z, maxspin2z) } if args.include_candidate_in_optimizer: From b1838bf14386836a3e68e1c7a21f1f3485827a4b Mon Sep 17 00:00:00 2001 From: SouradeepPal <126229608+SouradeepPal@users.noreply.github.com> Date: Tue, 11 Jul 2023 13:43:16 +0530 Subject: [PATCH 12/32] Introduced a new arguement --skymap-only-ifos in pycbc_live (#4346) * Introduced a new arguement --skymap-only-ifos in pycbc_live * Default skymap_only_ifos changed * skymap_only_ifos as an attribute of LiveEventManager * Test skymaps with V1 as skymap_only_ifos * singles with skymap_only_ifos * Removing commented lines etc * A default seed for SNR opt * Poking CI tests I remove an unnecessary FIXME in the comments, but mostly doing this to restart the CI tests, which seem to have not linked up right. --------- Co-authored-by: Souradeep Pal Co-authored-by: Ian Harry --- bin/pycbc_live | 26 +++++++++++++++++--------- bin/pycbc_optimize_snr | 2 +- pycbc/io/live.py | 14 -------------- 3 files changed, 18 insertions(+), 24 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index 157c27549b3..9703ac662f1 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -79,6 +79,7 @@ class LiveEventManager(object): def __init__(self, args, bank): self.low_frequency_cutoff = args.low_frequency_cutoff self.bank = bank + self.skymap_only_ifos = [] if args.skymap_only_ifos is None else list(set(args.skymap_only_ifos)) # Figure out what we are supposed to process within the pool of MPI processes self.comm = mpi.COMM_WORLD @@ -212,7 +213,7 @@ class LiveEventManager(object): ifo, triggers, pvalue_info, - recalculate_ifar=recalculate_ifar + recalculate_ifar=recalculate_ifar and ifo not in self.skymap_only_ifos ) # the SNR time series sample rate can vary slightly due to @@ -416,7 +417,7 @@ class LiveEventManager(object): # reload_buffer time. These args are documented here: # https://ligo-gracedb.readthedocs.io/en/latest/api.html#ligo.gracedb.rest.GraceDb # Because we do not change any of the request session values when running the - # code, it should remain thread safe. + # code, it should remain thread safe. gdbargs = {'reload_certificate': True, 'reload_buffer': 300} if self.gracedb_server: gdbargs['service_url'] = self.gracedb_server @@ -457,7 +458,8 @@ class LiveEventManager(object): logging.info('computing followup data for coinc') coinc_ifos = coinc_results['foreground/type'].split('-') - followup_ifos = list(set(ifos) - set(coinc_ifos)) + followup_ifos = set(ifos) - set(coinc_ifos) + followup_ifos = list(followup_ifos | set(self.skymap_only_ifos)) double_ifar = coinc_results['foreground/ifar'] if double_ifar < args.ifar_double_followup_threshold: @@ -503,7 +505,7 @@ class LiveEventManager(object): ifar = coinc_results['foreground/ifar'] upload_checks = self.enable_gracedb_upload and self.ifar_upload_threshold < ifar optimize_snr_checks = self.run_snr_optimization and self.ifar_upload_threshold < ifar - + # Keep track of the last few coincs uploaded in order to # prevent singles being uploaded as well for coinc events self.last_few_coincs_uploaded.append(event.merger_time) @@ -543,6 +545,7 @@ class LiveEventManager(object): logging.info(f'Found {ifo} single with ifar {sifar}') followup_ifos = [i for i in active if i is not ifo] + followup_ifos = list(set(followup_ifos) | set(self.skymap_only_ifos)) # Don't recompute ifar considering other ifos sld = self.compute_followup_data( [ifo], @@ -908,6 +911,8 @@ parser.add_argument('--enable-embright-has-massgap', action='store_true', defaul parser.add_argument('--embright-massgap-max', type=float, default=5.0, metavar='SOLAR MASSES', help='Upper limit of the mass gap, used for estimating ' 'HasMassGap probability.') +parser.add_argument('--skymap-only-ifos', nargs='+', + help="Detectors that only contribute in sky localization") scheme.insert_processing_option_group(parser) LiveSingle.insert_args(parser) @@ -952,6 +957,7 @@ if bank.min_f_lower < args.low_frequency_cutoff: logging.info('Analyzing data from detectors %s', ppdets(ifos)) evnt = LiveEventManager(args, bank) +logging.info('Detectors that only aid in the sky localization %s', ppdets(evnt.skymap_only_ifos)) # include MPI rank and functional description into proctitle task_name = 'root' if evnt.rank == 0 else 'filtering' @@ -1029,7 +1035,8 @@ with ctx: # Create double coincident background estimator for every combo if args.enable_background_estimation and evnt.rank == 0: - ifo_combos = itertools.combinations(ifos, 2) + trigg_ifos = [ifo for ifo in ifos if ifo not in evnt.skymap_only_ifos] + ifo_combos = itertools.combinations(trigg_ifos, 2) estimators = [] for combo in ifo_combos: logging.info('Will calculate %s background', ppdets(combo, "-")) @@ -1098,10 +1105,11 @@ with ctx: ) if status is True: - evnt.live_detectors.add(ifo) - if evnt.rank > 0: - logging.info('Filtering %s', ifo) - results[ifo] = mf.process_data(data_reader[ifo]) + if ifo not in evnt.skymap_only_ifos: + evnt.live_detectors.add(ifo) + if evnt.rank > 0: + logging.info('Filtering %s', ifo) + results[ifo] = mf.process_data(data_reader[ifo]) else: logging.info('Insufficient data for %s analysis', ifo) diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index f0eea382558..ce38ac7f793 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -322,7 +322,7 @@ parser.add_argument('--include-candidate-in-optimizer', action='store_true', help='Include parameters of the candidate event in the ' 'initialised array for the optimizer. Only relevant for ' '--optimizer pso or differential_evolution') -parser.add_argument('--seed', type=int, +parser.add_argument('--seed', type=int, default=42, help='Seed to supply to the random generation of initial ' 'array to pass to the optimizer. Only relevant for ' '--optimizer pso or differential_evolution') diff --git a/pycbc/io/live.py b/pycbc/io/live.py index 5e546cfc2a7..754fcbcf563 100644 --- a/pycbc/io/live.py +++ b/pycbc/io/live.py @@ -92,7 +92,6 @@ def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): snr_ifos = sld.keys() # Ifos with SNR time series calculated self.snr_series = {ifo: sld[ifo]['snr_series'] for ifo in snr_ifos} # Extra ifos have SNR time series but not sngl inspiral triggers - extra_ifos = list(set(snr_ifos) - set(self.et_ifos)) for ifo in snr_ifos: # Ifos used for sky loc must have a PSD @@ -101,14 +100,11 @@ def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): else: self.snr_series = None snr_ifos = self.et_ifos - extra_ifos = [] # Set up the bare structure of the xml document outdoc = ligolw.Document() outdoc.appendChild(ligolw.LIGO_LW()) - # FIXME is it safe (in terms of downstream operations) to let - # `program_name` default to the actual script name? proc_id = create_process_table(outdoc, program_name='pycbc', detectors=snr_ifos).process_id @@ -191,16 +187,6 @@ def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): self.et_ifos]) \ + self.time_offset - # For extra detectors used only for sky loc, respect BAYESTAR's - # assumptions and checks - bayestar_check_fields = ('mass1 mass2 mtotal mchirp eta spin1x ' - 'spin1y spin1z spin2x spin2y spin2z').split() - for sngl in sngl_inspiral_table: - if sngl.ifo in extra_ifos: - for bcf in bayestar_check_fields: - setattr(sngl, bcf, getattr(sngl_populated, bcf)) - sngl.end = lal.LIGOTimeGPS(self.merger_time) - outdoc.childNodes[0].appendChild(coinc_event_map_table) outdoc.childNodes[0].appendChild(sngl_inspiral_table) From d344e74648333dee0bf6f85e513ba625fe1472c2 Mon Sep 17 00:00:00 2001 From: xangma Date: Wed, 5 Apr 2023 15:51:10 +0100 Subject: [PATCH 13/32] Fixes for Mac. --- bin/pycbc_live | 7 ++++++- pycbc/events/coinc.py | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index 9703ac662f1..6b1f01b3697 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -15,6 +15,7 @@ import os.path import itertools import platform import subprocess +import multiprocessing from multiprocessing.dummy import threading from matplotlib import use use('agg') @@ -111,7 +112,11 @@ class LiveEventManager(object): if self.run_snr_optimization: # preestimate the number of CPU cores that we can afford giving # to followup processes without slowing down the main search - available_cores = len(os.sched_getaffinity(0)) + # if the machine isn't a mac + if platform.system() != 'Darwin': + available_cores = len(os.sched_getaffinity(0)) + else: + available_cores = multiprocessing.cpu_count() bg_cores = len(tuple(itertools.combinations(ifos, 2))) analysis_cores = 1 + bg_cores self.fu_cores = available_cores - analysis_cores diff --git a/pycbc/events/coinc.py b/pycbc/events/coinc.py index 5ca119ebc20..23eb7da4a5d 100644 --- a/pycbc/events/coinc.py +++ b/pycbc/events/coinc.py @@ -369,8 +369,8 @@ def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, **kwargs): else: time = 0.5 * (time2 + time1) - tslide = timeslide_id.astype(numpy.float128) - time = time.astype(numpy.float128) + tslide = timeslide_id.astype(numpy.longdouble) + time = time.astype(numpy.longdouble) span = (time.max() - time.min()) + window * 10 time = time + span * tslide @@ -424,8 +424,8 @@ def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window, nifos_minusone = (num_ifos - numpy.ones_like(num_ifos)) time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos - tslide = timeslide_id.astype(numpy.float128) - time_avg = time_avg.astype(numpy.float128) + tslide = timeslide_id.astype(numpy.longdouble) + time_avg = time_avg.astype(numpy.longdouble) span = (time_avg.max() - time_avg.min()) + window * 10 time_avg = time_avg + span * tslide From 96f1ac265400e3039b562f8f2314305a85b08864 Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 15 May 2023 10:58:36 +0100 Subject: [PATCH 14/32] Update pycbc_live --- bin/pycbc_live | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index 6b1f01b3697..95b279cfa5d 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -112,19 +112,21 @@ class LiveEventManager(object): if self.run_snr_optimization: # preestimate the number of CPU cores that we can afford giving # to followup processes without slowing down the main search - # if the machine isn't a mac + bg_cores = len(tuple(itertools.combinations(ifos, 2))) + analysis_cores = 1 + bg_cores if platform.system() != 'Darwin': available_cores = len(os.sched_getaffinity(0)) + self.fu_cores = available_cores - analysis_cores + if self.fu_cores <= 0: + logging.warning( + 'Insufficient number of CPU cores (%d) to ' + 'run search and trigger followups. Uploaded ' + 'triggers will momentarily increase the lag', + available_cores + ) + self.fu_cores = 1 else: - available_cores = multiprocessing.cpu_count() - bg_cores = len(tuple(itertools.combinations(ifos, 2))) - analysis_cores = 1 + bg_cores - self.fu_cores = available_cores - analysis_cores - if self.fu_cores <= 0: - logging.warning('Insufficient number of CPU cores (%d) to ' - 'run search and trigger followups. Uploaded ' - 'triggers will momentarily increase the lag', - available_cores) + # To enable mac testing, this is just set to 1 self.fu_cores = 1 if args.enable_embright_has_massgap: From fc6f44999da9d559dc793a6ca6a35952e6376813 Mon Sep 17 00:00:00 2001 From: Gareth S Cabourn Davies Date: Thu, 20 Jul 2023 10:39:22 +0100 Subject: [PATCH 15/32] bugfix - allow non-running of singles (#4439) * bugfix - allow non-running of singles * CC * Revrt wrongly-added test chnages --- pycbc/events/single.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pycbc/events/single.py b/pycbc/events/single.py index 5312466fb57..f41b2dfb131 100644 --- a/pycbc/events/single.py +++ b/pycbc/events/single.py @@ -108,6 +108,10 @@ def verify_args(args, parser, ifos): parser.error("All IFOs required in --single-ifar-est-dist " "if IFO-specific options are given.") + if args.sngl_ifar_est_dist[ifo] is None: + # Default - no singles being used + continue + if not args.sngl_ifar_est_dist[ifo] == 'fixed': if not args.single_fit_file: # Fixed IFAR option doesnt need the fits file From 6deb6f379fba39824b00877c61d8181e40e7ba26 Mon Sep 17 00:00:00 2001 From: Gareth S Cabourn Davies Date: Fri, 4 Aug 2023 16:50:34 +0100 Subject: [PATCH 16/32] Live single fits combined bug (#4449) * Take precentile of rate, not count * make change to mean as well * Fix to label * Update bin/live/pycbc_live_plot_combined_single_fits Co-authored-by: Tito Dal Canton * Update bin/live/pycbc_live_combine_single_fits --------- Co-authored-by: Tito Dal Canton --- bin/live/pycbc_live_combine_single_fits | 8 +++++--- bin/live/pycbc_live_plot_combined_single_fits | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/bin/live/pycbc_live_combine_single_fits b/bin/live/pycbc_live_combine_single_fits index 4db23d03d1f..d33d9b3aa44 100644 --- a/bin/live/pycbc_live_combine_single_fits +++ b/bin/live/pycbc_live_combine_single_fits @@ -161,9 +161,11 @@ for ifo in args.ifos: cons_alpha = np.percentile(a, 100 - args.conservative_percentile) cons_alphas_out[ifo][counter] = cons_alpha alphas_out[ifo][counter] = mean_alpha - cons_count = np.percentile(c, args.conservative_percentile) - cons_counts_out[ifo][counter] = cons_count * len(c) - counts_out[ifo][counter] = c.sum() + # To get the count values, we need to convert to rates and back again + r = c / l_times[ad_order] + cons_rate = np.percentile(r, args.conservative_percentile) + cons_counts_out[ifo][counter] = cons_rate * l_times[ad_order].sum() + counts_out[ifo][counter] = np.mean(r) * l_times[ad_order].sum() fout_ifo[f'separate_fits/bin_{counter:d}/fit_coeff'] = a fout_ifo[f'separate_fits/bin_{counter:d}/counts'] = c diff --git a/bin/live/pycbc_live_plot_combined_single_fits b/bin/live/pycbc_live_plot_combined_single_fits index c5db51af499..7c88c6bd490 100644 --- a/bin/live/pycbc_live_plot_combined_single_fits +++ b/bin/live/pycbc_live_plot_combined_single_fits @@ -189,7 +189,7 @@ for ifo in ifos: count_labels = [l.get_label() for l in count_lines] ax_count.legend(count_lines, count_labels, loc='lower center', ncol=5, bbox_to_anchor=(0.5, 1.01)) - ax_count.set_ylabel('Counts per live time') + ax_count.set_ylabel('Rate of triggers above fit threshold [s$^{-1}$]') for ax in [ax_count, ax_alpha]: ax.set_xticks(xtix) From 138bb956ba292effcf3be58076c133238ac4a53d Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Mon, 7 Aug 2023 16:14:10 +0200 Subject: [PATCH 17/32] Live cleanup combine single fits (#4450) * Docstring * Remove unused variables * Simplify loops/ifs * Whitespace * Logging tweaks * Unnecessary f-strings * Gareth's suggestion --- bin/live/pycbc_live_combine_single_fits | 62 +++++++++++-------------- 1 file changed, 27 insertions(+), 35 deletions(-) diff --git a/bin/live/pycbc_live_combine_single_fits b/bin/live/pycbc_live_combine_single_fits index d33d9b3aa44..924d6fb2823 100644 --- a/bin/live/pycbc_live_combine_single_fits +++ b/bin/live/pycbc_live_combine_single_fits @@ -12,12 +12,15 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. +"""Combine PyCBC Live single-detector trigger fitting parameters from several +different files.""" + import h5py, numpy as np, argparse import logging import pycbc -parser = argparse.ArgumentParser(usage="", - description="Combine fitting parameters from several different files") + +parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--verbose", action="store_true", help="Print extra debugging information", default=False) parser.add_argument("--trfits-files", nargs="+", required=True, @@ -30,7 +33,7 @@ parser.add_argument("--output", required=True, parser.add_argument("--ifos", required=True, nargs="+", help="list of ifos fo collect info for") -args=parser.parse_args() +args = parser.parse_args() pycbc.init_logging(args.verbose) @@ -42,8 +45,8 @@ if args.conservative_percentile < 50 or \ "otherwise it is either not a percentile, or not " "conservative.") -counts_all = {ifo:[] for ifo in args.ifos} -alphas_all = {ifo:[] for ifo in args.ifos} +counts_all = {ifo: [] for ifo in args.ifos} +alphas_all = {ifo: [] for ifo in args.ifos} analysis_dates = [] with h5py.File(args.trfits_files[0], 'r') as fit_f0: @@ -58,7 +61,7 @@ with h5py.File(args.trfits_files[0], 'r') as fit_f0: fit_thresh = fit_f0.attrs['fit_threshold'] fit_func = fit_f0.attrs['fit_function'] -live_times = {ifo : [] for ifo in args.ifos} +live_times = {ifo: [] for ifo in args.ifos} trigger_file_starts = [] trigger_file_ends = [] @@ -67,7 +70,6 @@ n_files = len(args.trfits_files) logging.info("Checking through %d files", n_files) for f in args.trfits_files: - fits_f = h5py.File(f, 'r') # Check that the file uses the same setup as file 0, to make sure # all coefficients are comparable @@ -84,10 +86,9 @@ for f in args.trfits_files: for ifo in args.ifos: if ifo not in fits_f: continue - else: - trig_times = fits_f[ifo]['triggers']['end_time'][:] - gps_last = max(gps_last, trig_times.max()) - gps_first = min(gps_first, trig_times.min()) + trig_times = fits_f[ifo]['triggers']['end_time'][:] + gps_last = max(gps_last, trig_times.max()) + gps_first = min(gps_first, trig_times.min()) trigger_file_starts.append(gps_first) trigger_file_ends.append(gps_last) @@ -101,7 +102,7 @@ for f in args.trfits_files: counts_all[ifo].append(fits_f[ifo + '/counts'][:]) alphas_all[ifo].append(fits_f[ifo + '/fit_coeff'][:]) if any(np.isnan(fits_f[ifo + '/fit_coeff'][:])): - logging.info("nan in " + f + ", " + ifo) + logging.info("nan in %s, %s", f, ifo) logging.info(fits_f[ifo + '/fit_coeff'][:]) fits_f.close() @@ -118,10 +119,10 @@ ad = trigger_file_ends[ad_order] - start_time_n counts_bin = {ifo: [c for c in zip(*counts_all[ifo])] for ifo in args.ifos} alphas_bin = {ifo: [a for a in zip(*alphas_all[ifo])] for ifo in args.ifos} -alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} -counts_out = {ifo : np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos} -cons_alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} -cons_counts_out = {ifo : np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos} +alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} +counts_out = {ifo: np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos} +cons_alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} +cons_counts_out = {ifo: np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos} logging.info("Writing results") fout = h5py.File(args.output, 'w') @@ -132,23 +133,14 @@ fout['bins_edges'] = list(bl) + [bu[-1]] fout['fits_dates'] = ad + start_time_n for ifo in args.ifos: - fout.create_group(ifo) - fout[ifo].attrs['live_time'] = sum(live_times[ifo]) - -save_allmeanalpha = {} -for ifo in args.ifos: - fout_ifo = fout[ifo] logging.info(ifo) + fout_ifo = fout.create_group(ifo) l_times = np.array(live_times[ifo]) - count_all = np.sum(counts_bin[ifo], axis=0) / l_times - invalphan = np.array(counts_bin[ifo]) / np.array(alphas_bin[ifo]) - invalphan_all = np.mean(invalphan, axis=0) - alpha_all = np.mean(counts_bin[ifo], axis=0) / invalphan_all - meant = l_times.mean() + fout_ifo.attrs['live_time'] = l_times.sum() - fout_ifo[f'separate_fits/live_times'] = l_times[ad_order] - fout_ifo[f'separate_fits/start_time'] = trigger_file_starts[ad_order] - fout_ifo[f'separate_fits/end_time'] = trigger_file_ends[ad_order] + fout_ifo['separate_fits/live_times'] = l_times[ad_order] + fout_ifo['separate_fits/start_time'] = trigger_file_starts[ad_order] + fout_ifo['separate_fits/end_time'] = trigger_file_ends[ad_order] for counter, a_c_u_l in enumerate(zip(alphas_bin[ifo], counts_bin[ifo], bu, bl)): @@ -181,15 +173,15 @@ for ifo in args.ifos: # Take some averages for plotting and summary values overall_invalphan = counts_out[ifo] / alphas_out[ifo] overall_meanalpha = counts_out[ifo].mean() / overall_invalphan.mean() - sum_counts_out = counts_out[ifo].sum() / sum(live_times[ifo]) - save_allmeanalpha[ifo] = overall_meanalpha # For the fixed version, we just set this to 1 - fout_ifo['fixed/counts'] = [1 for c in counts_out[ifo]] - fout_ifo['fixed/fit_coeff'] = [0 for a in alphas_out[ifo]] + fout_ifo['fixed/counts'] = [1] * len(counts_out[ifo]) + fout_ifo['fixed/fit_coeff'] = [0] * len(alphas_out[ifo]) # Add some useful info to the output file - fout_ifo.attrs['mean_alpha'] = save_allmeanalpha[ifo] + fout_ifo.attrs['mean_alpha'] = overall_meanalpha fout_ifo.attrs['total_counts'] = counts_out[ifo].sum() fout.close() + +logging.info('Done') From a9d90162289d24c99efdf407d25a8d882cf87786 Mon Sep 17 00:00:00 2001 From: xangma Date: Tue, 8 Aug 2023 14:45:59 +0100 Subject: [PATCH 18/32] Threading race condition bugfix for optimize_snr in pycbc_live (#4342) * setup snr_opt before threading * Some minor tweaks, setup optimize_snr even if not performing it * Dont open file unless using it * TDC comments * clarify comments * bug --------- Co-authored-by: GarethCabournDavies --- bin/pycbc_live | 124 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 95 insertions(+), 29 deletions(-) diff --git a/bin/pycbc_live b/bin/pycbc_live index 95b279cfa5d..763d4ef0f09 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -109,6 +109,8 @@ class LiveEventManager(object): # Keep track of which events have been uploaded self.last_few_coincs_uploaded = [] + # Give this a nominal value, it is required but not used + self.fu_cores = None if self.run_snr_optimization: # preestimate the number of CPU cores that we can afford giving # to followup processes without slowing down the main search @@ -284,9 +286,9 @@ class LiveEventManager(object): triggers[f'foreground/pvalue_{ifo}_saturated'] = pv_sat def setup_optimize_snr( - self, results, live_ifos, triggering_ifos, fname, gid + self, results, live_ifos, triggering_ifos, fname ): - """Setup and start the network SNR optimization process for a + """Setup the network SNR optimization process for a candidate event. See arXiv:2008.07494 for details. Parameters @@ -303,8 +305,6 @@ class LiveEventManager(object): File name where the candidate information has been saved. Used to name other files produced as part of setting up the SNR optimization. - gid : str - GraceDB ID of the candidate. """ template_id = results[f'foreground/{live_ifos[0]}/template_id'] p = props(self.bank.table[template_id]) @@ -360,8 +360,6 @@ class LiveEventManager(object): hdfp['ifar'] = results['foreground/ifar'] if 'p_terr' in results: hdfp['p_terr'] = results['p_terr'] - if gid is not None: - hdfp['gid'] = gid for ifo in args.channel_name: hdfp[f'channel_names/{ifo}'] = args.channel_name[ifo] @@ -391,7 +389,9 @@ class LiveEventManager(object): if self.enable_gracedb_upload: cmd += '--enable-gracedb-upload ' - cmd += '--cores {} '.format(self.fu_cores) + if self.fu_cores: + cmd += '--cores {} '.format(self.fu_cores) + if args.processing_scheme: # we will use the cores for multiple workers of the # optimization routine, so we force the processing scheme @@ -404,6 +404,32 @@ class LiveEventManager(object): opt_scheme = args.processing_scheme.split(':')[0] cmd += '--processing-scheme {}:1 '.format(opt_scheme) + # Save the command which would be used: + snroc_fname = os.path.join(out_dir_path, 'snr_optimize_command.txt') + with open(snroc_fname,'w') as snroc_file: + snroc_file.write(cmd) + + return cmd, out_dir_path + + def run_optimize_snr(self, cmd, out_dir_path, attribute_fname, gid): + """ + Run the optimize_snr instance setup up by setup_optimize_snr + + Parameters + ---------- + cmd: str + Command to tell subprocess what to run for the + optimize_snr process + out_dir_path: os.path or str + Place for storing the SNR optimization results and log + attribute_fname: str + The filename of the attributes of the candidate + gid: str + GraceDB ID of the candidate + """ + if gid is not None: + with h5py.File(attribute_fname, 'a') as hdfp: + hdfp['gid'] = gid log_fname = os.path.join(out_dir_path, 'optimize_snr.log') logging.info('running %s with log to %s', cmd, log_fname) @@ -430,7 +456,7 @@ class LiveEventManager(object): gdbargs['service_url'] = self.gracedb_server self.gracedb = GraceDb(**gdbargs) - def upload_in_thread(self, event, fname, comment, results, live_ifos, ifos, + def upload_in_thread(self, event, fname, comment, cmd, out_dir_path, upload_checks, optimize_snr_checks): gid = None if upload_checks: @@ -442,13 +468,14 @@ class LiveEventManager(object): search=self.gracedb_search, labels=self.gracedb_labels ) + if optimize_snr_checks: - self.setup_optimize_snr( - results, - live_ifos, - ifos, - fname, - gid + logging.info('Optimizing SNR for event above threshold ..') + self.run_optimize_snr( + cmd, + out_dir_path, + fname.replace('.xml.gz', '_attributes.hdf'), + gid ) def check_coincs(self, ifos, coinc_results, psds): @@ -520,17 +547,33 @@ class LiveEventManager(object): self.last_few_coincs_uploaded = \ self.last_few_coincs_uploaded[-10:] + # Save the event if not upload_checks: event.save(fname) + + # Save the event info/data and what _would_ be run for SNR optimizer, + # even if not running it - do this before the thread so no + # data buffers move on in a possible interim period + + # Which IFOs were active? + live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] + + # Tell SNR optimized event about p_terr + if hasattr(event, 'p_terr') and event.p_terr is not None: + coinc_results['p_terr'] = event.p_terr + + # Do the optimizer setup + cmd, out_dir_path = self.setup_optimize_snr( + coinc_results, + live_ifos, + coinc_ifos, + fname, + ) + + # Now start the thread to run the SNR optimizer if upload_checks or optimize_snr_checks: - if optimize_snr_checks: - logging.info('Optimizing SNR for coinc above threshold ..') - # Tell snr optimized event about p_terr - if hasattr(event, 'p_terr') and event.p_terr is not None: - coinc_results['p_terr'] = event.p_terr - live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] thread_args = ( - event, fname, comment, coinc_results, live_ifos, coinc_ifos, + event, fname, comment, cmd, out_dir_path, upload_checks, optimize_snr_checks ) gdb_upload_thread = threading.Thread(target=self.upload_in_thread, @@ -590,10 +633,12 @@ class LiveEventManager(object): comment = comment.format(ifo, ppdets(followup_ifos)) # Has a coinc event at this time been uploaded recently? - # If so, skip upload + # If so, skip upload - Note that this means that we _always_ + # prefer an uploaded coincident event over a single coinc_tdiffs = abs(event.merger_time - numpy.array(self.last_few_coincs_uploaded)) nearby_coincs = coinc_tdiffs < 0.1 + if any(nearby_coincs): logging.info( "Single-detector candidate at time %.3f was already " @@ -608,17 +653,38 @@ class LiveEventManager(object): self.ifar_upload_threshold < single['foreground/ifar'] and \ not any(nearby_coincs) and \ self.run_snr_optimization + + # Save the event if not upload_checks: event.save(fname) + + # Save the event info/data and what _would_ be run for SNR optimizer, + # even if not running it - do this before the thread so no + # data buffers move on in a possible interim period + + if any(nearby_coincs): + # Don't even save the snr optimization stuff for singles + # where there is already a coinc + continue + + # Which IFOs were active? + live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] + + # Tell SNR optimized event about p_terr + if hasattr(event, 'p_terr') and event.p_terr is not None: + single['p_terr'] = event.p_terr + + # Do the optimizer setup + cmd, out_dir_path = self.setup_optimize_snr( + single, + live_ifos, + [ifo], + fname, + ) + if upload_checks or optimize_snr_checks: - if optimize_snr_checks: - logging.info('Optimizing SNR for single above threshold ..') - # Tell snr optimized event about p_terr - if hasattr(event, 'p_terr') and event.p_terr is not None: - single['p_terr'] = event.p_terr - live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] thread_args = ( - event, fname, comment, single, live_ifos, [ifo], + event, fname, comment, cmd, out_dir_path, upload_checks, optimize_snr_checks ) gdb_upload_thread = threading.Thread(target=self.upload_in_thread, From 7be12f1ee83e00ca58d724b29acd249bb5d9a3e9 Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Tue, 21 Nov 2023 02:21:50 -0800 Subject: [PATCH 19/32] Revert "Allow SNR optimizer to use candidate point in initial array (#4393)" This reverts commit 460d2a40c4bdaf1446fd541f9e7666b24c625d35. The revert is to avoid a bug in the SNR optimizer introduced by this commit and later fixed properly in #4561. --- bin/pycbc_optimize_snr | 85 ++++++------------------------------------ 1 file changed, 12 insertions(+), 73 deletions(-) diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index ce38ac7f793..f6c51c7b135 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -130,28 +130,13 @@ def compute_minus_network_snr_pso(v, *argv, **kwargs): return -nsnr_array -def normalize_initial_point(initial_point, bounds): - return (initial_point - bounds[:,0]) / (bounds[:,1] - bounds[:,0]) - -def optimize_di(bounds, cli_args, extra_args, initial_point): - bounds = numpy.array([ +def optimize_di(bounds, cli_args, extra_args): + bounds = [ bounds['mchirp'], bounds['eta'], bounds['spin1z'], bounds['spin2z'] - ]) - - - # Currently only implemented for random seed initial array - rng = numpy.random.mtrand._rand - population_shape=(cli_args.di_popsize, 4) - population = rng.uniform(size=population_shape) - if cli_args.include_candidate_in_optimizer: - # Re-normalize the initial point into the correct range - point_init = normalize_initial_point(initial_point, bounds) - # add the initial point to the population - population = numpy.concatenate((population[:-1], point_init)) - + ] results = differential_evolution( compute_minus_network_snr, bounds, @@ -161,13 +146,12 @@ def optimize_di(bounds, cli_args, extra_args, initial_point): mutation=(0.5, 1), recombination=0.7, callback=callback_func, - args=extra_args, - init=population + args=extra_args ) return results.x -def optimize_shgo(bounds, cli_args, extra_args, initial_point): # pylint: disable=unused-argument +def optimize_shgo(bounds, cli_args, extra_args): bounds = [ bounds['mchirp'], bounds['eta'], @@ -184,48 +168,30 @@ def optimize_shgo(bounds, cli_args, extra_args, initial_point): # pylint: disabl ) return results.x -def normalize_population(population, min_bounds, max_bounds): - norm_pop = min_bounds + population * (max_bounds - min_bounds) - - return norm_pop -def optimize_pso(bounds, cli_args, extra_args, initial_point): +def optimize_pso(bounds, cli_args, extra_args): options = { 'c1': cli_args.pso_c1, 'c2': cli_args.pso_c2, 'w': cli_args.pso_w } - min_bounds = numpy.array([ + min_bounds = [ bounds['mchirp'][0], bounds['eta'][0], bounds['spin1z'][0], bounds['spin2z'][0] - ]) - max_bounds = numpy.array([ + ] + max_bounds = [ bounds['mchirp'][1], bounds['eta'][1], bounds['spin1z'][1], bounds['spin2z'][1] - ]) - - # Manually generate the initial points, this is the same as the default - # method, but allows us to make some modifications - population = numpy.random.uniform( - low=0.0, high=1.0, size=(cli_args.pso_particles, 4) - ) - population = normalize_population(population, min_bounds, max_bounds) - - if cli_args.include_candidate_in_optimizer: - # add the initial point to the population - population = numpy.concatenate((population[:-1], - initial_point)) - + ] optimizer = ps.single.GlobalBestPSO( n_particles=cli_args.pso_particles, dimensions=4, options=options, - bounds=(min_bounds, max_bounds), - init_pos=population + bounds=(min_bounds, max_bounds) ) _, results = optimizer.optimize( compute_minus_network_snr_pso, @@ -318,14 +284,6 @@ parser.add_argument('--pso-c2', type=float, default=2.0, parser.add_argument('--pso-w', type=float, default=0.01, help='Only relevant for --optimizer pso: ' 'The hyperparameter w: the inertia parameter.') -parser.add_argument('--include-candidate-in-optimizer', action='store_true', - help='Include parameters of the candidate event in the ' - 'initialised array for the optimizer. Only relevant for ' - '--optimizer pso or differential_evolution') -parser.add_argument('--seed', type=int, default=42, - help='Seed to supply to the random generation of initial ' - 'array to pass to the optimizer. Only relevant for ' - '--optimizer pso or differential_evolution') scheme.insert_processing_option_group(parser) fft.insert_fft_option_group(parser) @@ -338,9 +296,6 @@ if args.optimizer == 'pso' and ps == None: parser.error('You need to install pyswarms to use the pso optimizer.') pycbc.init_logging(args.verbose) -if args.seed: - numpy.random.seed(args.seed) - scheme.verify_processing_options(args, parser) fft.verify_fft_options(args, parser) @@ -420,27 +375,11 @@ bounds = { 'spin2z': (minspin2z, maxspin2z) } -if args.include_candidate_in_optimizer: - # Initial point from found candidate - mchirp_init = cv.mchirp_from_mass1_mass2(fp['mass1'][()], fp['mass2'][()]) - eta_init = cv.eta_from_mass1_mass2(fp['mass1'][()], fp['mass2'][()]) - spin1z_init = fp['spin1z'][()] - spin2z_init = fp['spin2z'][()] - - initial_point = numpy.array([ - mchirp_init, - eta_init, - spin1z_init, - spin2z_init, - ])[numpy.newaxis] -else: - initial_point = None - with scheme_context: logging.info('Starting optimization') optimize_func = optimize_funcs[args.optimizer] - opt_params = optimize_func(bounds, args, extra_args, initial_point) + opt_params = optimize_func(bounds, args, extra_args) logging.info('Optimization complete') From 991762d24649e7347bb32e602ac7fe20063a25eb Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Tue, 16 Jan 2024 15:24:30 +0100 Subject: [PATCH 20/32] Set version to 2.1.3 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 0e18895211c..0d68fb625b7 100755 --- a/setup.py +++ b/setup.py @@ -122,7 +122,7 @@ def __getattr__(self, attr): vinfo = _version_helper.generate_git_version_info() except: vinfo = vdummy() - vinfo.version = '2.1.2' + vinfo.version = '2.1.3' vinfo.release = 'True' with open('pycbc/version.py', 'wb') as f: From 7ebf243e55bd30a84e7267679ec67f061e791482 Mon Sep 17 00:00:00 2001 From: Gareth S Cabourn Davies Date: Wed, 6 Dec 2023 13:10:34 +0000 Subject: [PATCH 21/32] update lalsimulation cvmfs path (#4580) * update lalsimulation cvmfs path * missed that the mount location needs to be changed as well * more references to previsou CVMFS location --- Dockerfile | 4 ++-- docker/etc/docker-install.sh | 6 +++--- docs/building_bundled_executables.rst | 4 ++-- docs/install.rst | 4 ++-- docs/install_lalsuite.rst | 2 +- docs/workflow/pycbc_make_coinc_search_workflow.rst | 4 ++-- tools/cvmfs-default.local | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7002c97072a..97997bc5ab9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ RUN dnf -y install https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release- # set up environment RUN cd / && \ - mkdir -p /cvmfs/config-osg.opensciencegrid.org /cvmfs/oasis.opensciencegrid.org /cvmfs/gwosc.osgstorage.org && echo "config-osg.opensciencegrid.org /cvmfs/config-osg.opensciencegrid.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "oasis.opensciencegrid.org /cvmfs/oasis.opensciencegrid.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "gwosc.osgstorage.org /cvmfs/gwosc.osgstorage.org cvmfs ro,noauto 0 0" >> /etc/fstab && mkdir -p /oasis /scratch /projects /usr/lib64/slurm /var/run/munge && \ + mkdir -p /cvmfs/config-osg.opensciencegrid.org /cvmfs/software.igwn.org /cvmfs/gwosc.osgstorage.org && echo "config-osg.opensciencegrid.org /cvmfs/config-osg.opensciencegrid.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "software.igwn.org /cvmfs/software.igwn.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "gwosc.osgstorage.org /cvmfs/gwosc.osgstorage.org cvmfs ro,noauto 0 0" >> /etc/fstab && mkdir -p /oasis /scratch /projects /usr/lib64/slurm /var/run/munge && \ groupadd -g 1000 pycbc && useradd -u 1000 -g 1000 -d /opt/pycbc -k /etc/skel -m -s /bin/bash pycbc # Install MPI software needed for pycbc_inference @@ -37,7 +37,7 @@ ENV PATH "/usr/local/bin:/usr/bin:/bin:/lib64/openmpi/bin/bin" # Set the default LAL_DATA_PATH to point at CVMFS first, then the container. # Users wanting it to point elsewhere should start docker using: # docker -e LAL_DATA_PATH="/my/new/path" -ENV LAL_DATA_PATH "/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation:/opt/pycbc/pycbc-software/share/lal-data" +ENV LAL_DATA_PATH "/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation:/opt/pycbc/pycbc-software/share/lal-data" # When the container is started with # docker run -it pycbc/pycbc-el8:latest diff --git a/docker/etc/docker-install.sh b/docker/etc/docker-install.sh index 68d31814e96..72246631395 100755 --- a/docker/etc/docker-install.sh +++ b/docker/etc/docker-install.sh @@ -11,11 +11,11 @@ mkdir -p /opt/pycbc/src cp -a /scratch /opt/pycbc/src/pycbc chmod a+rw /dev/fuse mount /cvmfs/config-osg.opensciencegrid.org -mount /cvmfs/oasis.opensciencegrid.org +mount /cvmfs/software.igwn.org mkdir -p /opt/pycbc/pycbc-software/share/lal-data -rsync --exclude='SEOBNRv1ROM*' --exclude='SEOBNRv2ROM_DS_HI_v1.0.hdf5' --exclude='NRSur4d2s_FDROM.hdf5' -ravz /cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation/ /opt/pycbc/pycbc-software/share/lal-data/ +rsync --exclude='SEOBNRv1ROM*' --exclude='SEOBNRv2ROM_DS_HI_v1.0.hdf5' --exclude='NRSur4d2s_FDROM.hdf5' -ravz /cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation/ /opt/pycbc/pycbc-software/share/lal-data/ umount /cvmfs/config-osg.opensciencegrid.org -umount /cvmfs/oasis.opensciencegrid.org +umount /cvmfs/software.igwn.org chown -R 1000:1000 /opt/pycbc/src /opt/pycbc/pycbc-software chmod -R u=rwX,g=rX,o=rX /opt/pycbc exit 0 diff --git a/docs/building_bundled_executables.rst b/docs/building_bundled_executables.rst index d8d014047c6..612711b4eed 100644 --- a/docs/building_bundled_executables.rst +++ b/docs/building_bundled_executables.rst @@ -96,7 +96,7 @@ The script executes ``pycbc_inspiral`` as part of the build process. This may require LAL data at build time. The LAL data can be given with the command line argument:: - --with-lal-data-path=/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation + --with-lal-data-path=/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation The default command line arguments clone PyCBC from the standard GitHub repository. If you would like to build a bundle using code from your own @@ -130,6 +130,6 @@ Building Releases for CVMFS To build a release of ``pycbc_inspiral`` for installation in CVMFS, run the script with the arguments:: - pycbc_build_eah.sh --lalsuite-commit=a3a5a476d33f169b8749e2840c306a48df63c936 --pycbc-commit=b68832784969a47fe2658abffb3888ee06cd1be4 --with-extra-libs=file:///home/pycbc/build/composer_xe_2015.0.090.tar.gz --with-lal-data-path=/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation + pycbc_build_eah.sh --lalsuite-commit=a3a5a476d33f169b8749e2840c306a48df63c936 --pycbc-commit=b68832784969a47fe2658abffb3888ee06cd1be4 --with-extra-libs=file:///home/pycbc/build/composer_xe_2015.0.090.tar.gz --with-lal-data-path=/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation changing the ``--lalsuite-commit``, ``--pycbc-commit``, and ``--with-lal-data-path`` options to the values for the release. diff --git a/docs/install.rst b/docs/install.rst index f5b85aee832..9f9c308be74 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -70,12 +70,12 @@ available in a 'IGWN Conda' environment. To see what environments are available This should yield ``igwn-py37`` as one choice. The output of this command will also tell you the location of the environment in the file system. Then, the location of the -python3.7 executable is for instance ``/cvmfs/oasis.opensciencegrid.org/ligo/sw/conda/envs/igwn-py37/bin/python`` +python3.7 executable is for instance ``/cvmfs/software.igwn.org/conda/envs/igwn-py37/bin/python`` and you will create the virtualenv via the command .. code-block:: bash - virtualenv -p /cvmfs/oasis.opensciencegrid.org/ligo/sw/conda/envs/igwn-py37/bin/python env + virtualenv -p /cvmfs/software.igwn.org/conda/envs/igwn-py37/bin/python env Once the virtualenv has been created you can install PyCBC from PyPI or a local copy with the `[igwn]` extra specifier to install the optional extra requirements diff --git a/docs/install_lalsuite.rst b/docs/install_lalsuite.rst index 438b3b358bb..52d7e340925 100644 --- a/docs/install_lalsuite.rst +++ b/docs/install_lalsuite.rst @@ -146,7 +146,7 @@ run the command .. code-block:: bash - echo 'export LAL_DATA_PATH=/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation' >> $VIRTUAL_ENV/bin/activate + echo 'export LAL_DATA_PATH=/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation' >> $VIRTUAL_ENV/bin/activate to add the appropriate path to your virtual environment's ``activate`` script. Then deactivate and activate your virtual environment. diff --git a/docs/workflow/pycbc_make_coinc_search_workflow.rst b/docs/workflow/pycbc_make_coinc_search_workflow.rst index 93f8f8ad9ac..d83271389df 100644 --- a/docs/workflow/pycbc_make_coinc_search_workflow.rst +++ b/docs/workflow/pycbc_make_coinc_search_workflow.rst @@ -1004,8 +1004,8 @@ locations in CVMFS. You will also need to specify where the code should get the data needed to generate reduced order model waveforms. To do this add the following additional arguments to ``pycbc_submit_dax``:: - --append-site-profile 'local:env|LAL_DATA_PATH:/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation' \ - --append-site-profile 'osg:env|LAL_DATA_PATH:/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation' \ + --append-site-profile 'local:env|LAL_DATA_PATH:/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation' \ + --append-site-profile 'osg:env|LAL_DATA_PATH:/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation' \ Here, ``current`` is a symbolic link to the latest version of the data and can be replaced with a specific release (e.g. ``e02dab8c``) if required. diff --git a/tools/cvmfs-default.local b/tools/cvmfs-default.local index 6b11c123d76..04bc26c1e55 100644 --- a/tools/cvmfs-default.local +++ b/tools/cvmfs-default.local @@ -1,5 +1,5 @@ CVMFS_USER=cvmfs -CVMFS_REPOSITORIES=config-osg.opensciencegrid.org,oasis.opensciencegrid.org,gwosc.osgstorage.org +CVMFS_REPOSITORIES=config-osg.opensciencegrid.org,software.igwn.org,gwosc.osgstorage.org CVMFS_QUOTA_LIMIT=5000 CVMFS_HTTP_PROXY=DIRECT CVMFS_MAX_RETRIES=10 From 8288a191be021faa0e3c6eeb966618668e8f0b64 Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 8 Jan 2024 12:16:05 -0800 Subject: [PATCH 22/32] Revert "add coordinates_space.py (#4289)" This reverts commit e3418c70d8da796fb8b4932fb7f0b22d82621bf0. --- tox.ini | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 4c8f8f91acc..67aa9f7fda2 100644 --- a/tox.ini +++ b/tox.ini @@ -57,8 +57,9 @@ deps = git+https://github.com/ConWea/BBHX-waveform-model.git; sys_platform == 'linux' conda_deps= mysqlclient - gcc_linux-64 - gxx_linux-64 + gcc_linux-64>=12.2.0 + gxx_linux-64>=12.2.0 + binutils_linux-64>=2.39 gsl lapack==3.6.1 conda_channels=conda-forge From 98cf320d4045bb45667fd7aac7edee9de6871745 Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 8 Jan 2024 12:21:19 -0800 Subject: [PATCH 23/32] REmoving lisa examples --- docs/inference.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/inference.rst b/docs/inference.rst index 7ae2b8a7c8d..980da5f079c 100644 --- a/docs/inference.rst +++ b/docs/inference.rst @@ -513,7 +513,8 @@ Examples inference/examples/single.rst inference/examples/relative.rst inference/examples/hierarchical.rst - /inference/examples/lisa_smbhb.rst + inference/examples/sampler_platter.rst + inference/models.rst =============================================== Visualizing the Posteriors From 05c440623cc38aae3e8603d57a27a060e2d7e592 Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 8 Jan 2024 12:21:48 -0800 Subject: [PATCH 24/32] REmove inference examples --- docs/inference/examples/lisa_smbhb.rst | 68 -------------------------- 1 file changed, 68 deletions(-) delete mode 100644 docs/inference/examples/lisa_smbhb.rst diff --git a/docs/inference/examples/lisa_smbhb.rst b/docs/inference/examples/lisa_smbhb.rst deleted file mode 100644 index 2ceffbd5e71..00000000000 --- a/docs/inference/examples/lisa_smbhb.rst +++ /dev/null @@ -1,68 +0,0 @@ ----------------------------------------------- -LISA parameter estimation for simulated SMBHB ----------------------------------------------- - -This example shows how to use PyCBC for parameter estimation of supermassive black hole binaries (SMBHB) -in LISA mock data. The `data `_ are generated from -`LISA Data Challenge 2a: Sangria `_, -and `BBHx `_ package is used to generate the ``IMRPhenomD`` template and calculate -the corresponding TDI response for LISA. Relative binning (heterodyned likelihood) -is used during sampling to speed up the computation of likelihood functions. Before doing parameter estimation, -you need to install `BBHx `_ and `the corresponding PyCBC waveform plugin `_, -please click the corresponding link to see the detailed description of the installation. - -First, we create the following configuration file, here we just set chirp mass, mass ratio and tc as variable parameters, -`tc`, `eclipticlongitude`, `eclipticlatitude` and `polarization` are defined in the LISA frame: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/lisa_smbhb_relbin.ini - :language: ini - -:download:`Download <../../../examples/inference/lisa_smbhb/lisa_smbhb_relbin.ini>` - -By setting the model name to ``relative`` we are using -:py:class:`Relative ` model. - -In this simple example, we do the parameter estimation for the first SMBHB signal in the LDC Sangria dataset -(you can also run parameter estimation for other SMBHB signals by choosing appropriate prior range), -we need download the data first (`MBHB_params_v2_LISA_frame.pkl` contains all the true parameters): - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/get.sh - :language: bash - -:download:`Download <../../../examples/inference/lisa_smbhb/get.sh>` - -Now run: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/run.sh - :language: bash - -:download:`Download <../../../examples/inference/lisa_smbhb/run.sh>` - -This will run the ``dynesty`` sampler. When it is done, you will have a file called -``lisa_smbhb.hdf`` which contains the results. It should take about three minutes to -run. - -To plot the posterior distribution after the last iteration, you can run the following simplified script: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/plot.sh - :language: bash - -:download:`Download <../../../examples/inference/lisa_smbhb/plot.sh>` - -Or you can run the advanced one: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/advanced_plot.py - :language: python - -:download:`Download <../../../examples/inference/lisa_smbhb/advanced_plot.py>` - -You can modify this advanced plot script to generate the posterior of any SMBHB signals in the LDC Sangria dataset. -In this example it will create the following plot: - -.. image:: ../../_include/lisa_smbhb_mass_tc_0.png - :scale: 60 - :align: center - -The scatter points show each walker's position after the last iteration. The -points are colored by the SNR at that point, with the 50th and 90th -percentile contours drawn. The red lines represent the true parameters of injected signal. From 2ee05aaf4d6d70c39e934cd55f621243a344b767 Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 8 Jan 2024 12:52:43 -0800 Subject: [PATCH 25/32] Remove LISA deps --- tox.ini | 3 --- 1 file changed, 3 deletions(-) diff --git a/tox.ini b/tox.ini index 67aa9f7fda2..724a2bf49a1 100644 --- a/tox.ini +++ b/tox.ini @@ -52,9 +52,6 @@ commands = bash tools/pycbc_test_suite.sh [testenv:py-docs] deps = {[base]deps} - ; Needed for `BBHx` package to work with PyCBC - git+https://github.com/mikekatz04/BBHx.git; sys_platform == 'linux' - git+https://github.com/ConWea/BBHX-waveform-model.git; sys_platform == 'linux' conda_deps= mysqlclient gcc_linux-64>=12.2.0 From b33d7b77fffee045be367473a5957803b86470b9 Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Tue, 9 Jan 2024 01:10:09 -0800 Subject: [PATCH 26/32] Removing more LISA things --- docs/inference/models.rst | 212 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 docs/inference/models.rst diff --git a/docs/inference/models.rst b/docs/inference/models.rst new file mode 100644 index 00000000000..a3aa15746d2 --- /dev/null +++ b/docs/inference/models.rst @@ -0,0 +1,212 @@ +.. _models_detailed: + +--------------------------------------------- +Details of common Models in PyCBC Inference +--------------------------------------------- + +Commonly used likelihood models are compared below. PyCBC Inference +also has the capability to interface with external models. See the +`interactive tutorial `_ if you'd like to learn how to add you own +model for handling a new problem such as GRBs, Kilonova or whatever you can +imagine. + +============================================== +Standard models with full waveform generation +============================================== + +.. card:: Gaussian Noise + + ``'gaussian_noise'`` :py:class:`pycbc.inference.models.gaussian_noise.GaussianNoise` + + The `gaussian_noise` model is one of the most generic models in + PyCBC Inference. It is designed for gravitational-wave data that is + assumed to be Gaussian and stationary. Otherwise, there are no + restricts on the types of waveform models that may be used. + Waveform models supported by either `get_fd_waveform` or + `get_td_waveform` can be used as waveform models. Use this model only if + none other fits and is more optimized for your specific problem. Because + this waveform model is generic is often slower than the other more + specialized models. + + Supported Marginalizations: None + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + :ref:`Example ` + +.. card:: Marginalized Phase + + ``'marginalized_phase'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPhaseGaussianNoise` + + + The `marginalized_phase` model is a straightforward extension to + the `gaussian_noise` model that marginalizes the signal over an overall + phase. This can account for the orbital phase of a gravitational-wave + signal which is usually a nuissance parameter. However, this + implementation is only correct for signals that only include the + dominant gravitational-wave mode (or 22 mode). This is because each + mode has a different functional dependence on the orbital phase. + + + Supported Marginalizations: coa_phase (automatic, dominant-mode) + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + +.. card:: Marginalized Polarization + + ``'marginalized_polarization'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPolarization` + + + The `marginalized_polarization` model is a straightforward extension to + the `gaussian_noise` model that marginalizes the signal over + the polarization angle of the gravitational-wave signal. This is + done by filtering both the plus and cross polarizations separately + and then numerically integrating the resulting inner products over + a grid of polarization angle values. The density of the grid + is selectable. Additional marginalizations can also be optionally enabled. + This model is often used in perference to the `marginalized_phase` model + because it can support waveform models with higher order modes. + + Supported Marginalizations: polarization (automatic), coa_phase (dominant-mode), distance + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + +.. card:: Marginalized Time + + ``'marginalized_time'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedTime` + + + The `marginalized_time` model calculates a time series of inner products + between the signal model and data so that the likelihood can be + numerically marginalized over. The marginalization uses a weighted + monte-carlo which samples the most likely regions of the time series + more densly. The time series is also sub-sample interpolated. Higher + mode waveforms are also permitted as both the plus and cross polarizations + are explicitly handled separately. The time marginalization can also be + done in concert with sky and various intrinsic marginalizations. + + Supported Marginalizations: tc (time), distance, coa_phase (dominant mode), polarization, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + :ref:`Example ` + +.. card:: Marginalized Higher Mode Phase + + ``'marginalized_hmpolphase'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedHMPolPhase` + + + The `marginalized_hmpolphase` model numerically marginalizes the likelihood + over a grid of both polarization values and orbital phase values. It + explicitly handles this for higher-order-mode waveforms by calculating + all inner products between the data and signal on a mode-by-mode basis. + The likelihoods are then assembled for each polarization and phase value + and numerically integrated over. This model can only be used for + waveform approximants which support the PyCBC higher mode interface + as we need to be able to calculate the each mode separately. + + Supported Marginalizations: polarization (automatic), coa_phase (automatic) + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + +========================================= +Heterodyne / Relative Models +========================================= + +.. card:: Relative Model + + ``'relative'`` :py:class:`pycbc.inference.models.relbin.Relative` + + The `relative` model uses a reference signal provided in its + configuration to expand the likelihood in terms of differences between + the reference signal and a target signal. If the reference is close to the + peak in the likelihood, the reasonable portions of parameter space to + explore will only have small phase deviations from the reference signal + (i.e. several cycles). This allows us to represent the ration between + target signal and our referencee using a piece-wise linear approximation. + The target signal only needs to calculated at edges of this approximation. + This model thus only supports waveforms which can efficiently generate + a signal only at a given set of frequency values. Higher order modes + are not recommended due their possible violation of the approximation + that the ration between target and reference signals is a slowly varying + smooth function. In this case use the `multi_signal` model and treat + use a `relative` model for each mode. Where the model approximations hold, + use this in preference + to the models that need to generate a full waveform for every likelihood + as these will usually be much faster. + + Supported Marginalizations: distance, coa_phase (dominant mode), polarization + +++ + Earth Rotation:✅ LISA:✅ Higher Modes:❌ + :ref:`Example ` + +.. card:: Relative Time + + ``'relative_time'`` :py:class:`pycbc.inference.models.relbin.RelativeTime` + + The `relative_time` model extends the `relative model` by using calculating + the likelihood for a grid of possible merger times. A weighted monte-carlo + is performed to integrate over the merger time. Sub-sample interpolation + of the time series is performed. Sky location marginalization among others + can be done in concert. + + Supported Marginalizations: distance, coa_phase (dominant mode), polarization, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + +.. card:: Relative Time Dominant-mode Only + + ``'relative_time'`` :py:class:`pycbc.inference.models.relbin.RelativeTime` + + The `relative_time_dom` model further specializes the model to completely + preclude use with higher order mode signals. For this restriction, it + gains the ability to marginalize over inclination + + Supported Marginalizations: distance, coa_phase (dominant mode), polarization, inclination, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + + +========================================= +Extrinsic Parameter Only Models +========================================= + +.. card:: Single Template + + ``'single_template'`` :py:class:`pycbc.inference.models.single_template.SingleTemplate` + + The `single_template` model is only for extrinsic parameter estimation e.g. + sky location, distance, inclination, etc. It speeds up parameter estimation + by completely avoiding waveform generation while calculating likelihoods. + A single reference signal is generated with fixed intrinsic (masses, spins, etc) + parameters. The likelihoods can then be precalculated up to constant factors + which vary with the extrinsic parameters. Only dominant-mode signals are supported + as the plus and cross polarizations are assumed to be different only by + a phase. With this model all supported parameters may be marginalized over + or any subset. + + Supported Marginalizations: tc (time), distance, coa_phase (dominant mode), polarization, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + :ref:`Example ` + +================================================= +Composite Models +================================================= + +.. card:: Hierarchical + + :py:class:`pycbc.inference.models.hierarchical.HierarchicalModel` + + The hierachical model is a container for submodels. Each submodel + makes an indepedent contribution to an overall likelihood function. + + See :ref:`the full page on the hierarchical model `. + +.. card:: Multiple Signal + + ``'multi_signal'`` :py:class:`pycbc.inference.models.hierarchical.MultiSignalModel` + + This is a container for submodels where each model shares use of the same + data and hence there may be cross terms between the several signals. + This model requires support from the submodel to handle cross-term + calculations. Some supported models include the `gaussian_noise`, `relative` + and `single_template`. From bb1ae76a6f2f5377104310783381450549117c1a Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Wed, 19 Apr 2023 17:10:55 +0200 Subject: [PATCH 27/32] Replace gw-openscience.org with gwosc.org (#4326) --- bin/pycbc_losc_segment_query | 4 ++-- docs/dataquality.rst | 4 ++-- docs/inference/examples/gw150914.rst | 6 +++--- docs/pycbc_condition_strain.rst | 2 +- examples/gw150914/audio.py | 2 +- examples/gw150914/gw150914_h1_snr.py | 2 +- examples/inference/data/o1.ini | 2 +- examples/inference/data/o2.ini | 2 +- examples/inference/margtime/get.sh | 3 ++- examples/multi_inspiral/run.sh | 4 ++-- pycbc/catalog/catalog.py | 2 +- pycbc/dq.py | 2 +- pycbc/frame/losc.py | 12 ++++++------ 13 files changed, 24 insertions(+), 23 deletions(-) diff --git a/bin/pycbc_losc_segment_query b/bin/pycbc_losc_segment_query index 9476349bc62..4403af29168 100644 --- a/bin/pycbc_losc_segment_query +++ b/bin/pycbc_losc_segment_query @@ -41,8 +41,8 @@ def query_gwosc(ifo, segment_name, gps_start_time, duration): """ response = urlopen( - 'https://www.gw-openscience.org/timeline/segments/json/O1/{}_{}/{}/{}/'.format( - ifo, segment_name, gps_start_time, duration)) + f'https://www.gwosc.org/timeline/segments/json/O1/{ifo}_{segment_name}/{gps_start_time}/{duration}/' + ) logging.info(response.info()) json_segment_data = json.loads(response.read()) diff --git a/docs/dataquality.rst b/docs/dataquality.rst index 2d7cdc64cf9..ac3aa14cef5 100644 --- a/docs/dataquality.rst +++ b/docs/dataquality.rst @@ -27,8 +27,8 @@ Finding times of hardware injections What flags can I query? ======================== -A list of many of the flags which can be quiered is `available here `_. Instead, just give the -raw name such as "DATA" instead of "H1_DATA". +A list of many of the flags which can be quiered is `available here `_. +Instead, just give the raw name such as "DATA" instead of "H1_DATA". There are two additional types of flags which can be queried. These are the negation of the flags like "NO_CBC_HW_INJ". The flag "CBC_HW_INJ" would diff --git a/docs/inference/examples/gw150914.rst b/docs/inference/examples/gw150914.rst index 36dfc5cd0c6..2ae360c57e7 100644 --- a/docs/inference/examples/gw150914.rst +++ b/docs/inference/examples/gw150914.rst @@ -9,12 +9,12 @@ as was :ref:`used for the simulated BBH example`. We only data configuration file, so that we will run on real gravitational-wave data. First, we need to download the data from the `Gravitational Wave Open Science -Center `_. Run: +Center `_. Run: .. code-block:: bash - wget https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW150914/v3/H-H1_GWOSC_16KHZ_R1-1126257415-4096.gwf - wget https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW150914/v3/L-L1_GWOSC_16KHZ_R1-1126257415-4096.gwf + wget https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW150914/v3/H-H1_GWOSC_16KHZ_R1-1126257415-4096.gwf + wget https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW150914/v3/L-L1_GWOSC_16KHZ_R1-1126257415-4096.gwf This will download the appropriate data ("frame") files to your current working directory. You can now use the following data configuration file: diff --git a/docs/pycbc_condition_strain.rst b/docs/pycbc_condition_strain.rst index c4c6b9ba68e..001fcd86b1a 100644 --- a/docs/pycbc_condition_strain.rst +++ b/docs/pycbc_condition_strain.rst @@ -115,4 +115,4 @@ might read more data than what specified by the ``--gps-start-time`` and value that causes ``pycbc_condition_strain`` to request data outside the range of availability. -.. _GWOSC: https://www.gw-openscience.org/about/ +.. _GWOSC: https://www.gwosc.org/about/ diff --git a/examples/gw150914/audio.py b/examples/gw150914/audio.py index 1b63af981ef..8b82a85cb5b 100644 --- a/examples/gw150914/audio.py +++ b/examples/gw150914/audio.py @@ -9,7 +9,7 @@ # Read data and remove low frequency content fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' -url = "https://www.gw-openscience.org/GW150914data/" + fname +url = "https://www.gwosc.org/GW150914data/" + fname urlretrieve(url, filename=fname) h1 = highpass_fir(read_frame(fname, 'H1:LOSC-STRAIN'), 15.0, 8) diff --git a/examples/gw150914/gw150914_h1_snr.py b/examples/gw150914/gw150914_h1_snr.py index 9a37f85d25d..2360e702827 100644 --- a/examples/gw150914/gw150914_h1_snr.py +++ b/examples/gw150914/gw150914_h1_snr.py @@ -8,7 +8,7 @@ # Read data and remove low frequency content fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' -url = "https://www.gw-openscience.org/GW150914data/" + fname +url = "https://www.gwosc.org/GW150914data/" + fname urlretrieve(url, filename=fname) h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN') h1 = highpass_fir(h1, 15, 8) diff --git a/examples/inference/data/o1.ini b/examples/inference/data/o1.ini index cf0bd7ebf5a..64924323f48 100644 --- a/examples/inference/data/o1.ini +++ b/examples/inference/data/o1.ini @@ -23,7 +23,7 @@ psd-segment-stride = 4 ; with a ligo-data-server, you can use the frame-type argument to automatically ; locate the location of the frame files containing the data. If you are not ; running on one of those computers, download the necessary data from GWOSC -; (gw-openscience.org), remove the frame-type argument, and uncomment +; (gwosc.org), remove the frame-type argument, and uncomment ; frame-files, pointing the latter to the files you downloaded. ;frame-files = H1:/PATH/TO/DOWNLOADED/H1FRAME.gwf L1:/PATH/TO/DOWNLOADED/L1FRAME.gwf frame-type = H1:H1_LOSC_16_V1 L1:L1_LOSC_16_V1 diff --git a/examples/inference/data/o2.ini b/examples/inference/data/o2.ini index ebe71cfb8b2..873e5034149 100644 --- a/examples/inference/data/o2.ini +++ b/examples/inference/data/o2.ini @@ -24,7 +24,7 @@ psd-segment-stride = 4 ; with a ligo-data-server, you can use the frame-type argument to automatically ; locate the location of the frame files containing the data. If you are not ; running on one of those computers, download the necessary data from GWOSC -; (gw-openscience.org), remove the frame-type argument, and uncomment +; (gwosc.org), remove the frame-type argument, and uncomment ; frame-files, pointing the latter to the files you downloaded. frame-type = H1:H1_GWOSC_O2_16KHZ_R1 L1:L1_GWOSC_O2_16KHZ_R1 V1:V1_GWOSC_O2_16KHZ_R1 ;frame-files = H1:/PATH/TO/DOWNLOADED/H1FRAME.gwf L1:/PATH/TO/DOWNLOADED/L1FRAME.gwf V1:/PATH/TO/DOWNLOADED/V1FRAME.gwf diff --git a/examples/inference/margtime/get.sh b/examples/inference/margtime/get.sh index c8f9ece6359..54bc8f0abf1 100644 --- a/examples/inference/margtime/get.sh +++ b/examples/inference/margtime/get.sh @@ -4,5 +4,6 @@ for ifo in H-H1 L-L1 do file=${ifo}_GWOSC_4KHZ_R1-1126257415-4096.gwf test -f ${file} && continue - curl -O --show-error --silent https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW150914/v3/${ifo}_GWOSC_4KHZ_R1-1126257415-4096.gwf + curl -O -L --show-error --silent \ + https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW150914/v3/${ifo}_GWOSC_4KHZ_R1-1126257415-4096.gwf done diff --git a/examples/multi_inspiral/run.sh b/examples/multi_inspiral/run.sh index 30621131ddf..cbbc9bd131d 100755 --- a/examples/multi_inspiral/run.sh +++ b/examples/multi_inspiral/run.sh @@ -3,11 +3,11 @@ CONFIG_URL=https://github.com/gwastro/pycbc-config/raw/master/test/multi_inspiral BANK_FILE=gw170817_single_template.hdf BANK_VETO_FILE=bank_veto_bank.xml -H1_FRAME=https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW170817/v3/H-H1_GWOSC_4KHZ_R1-1187006835-4096.gwf +H1_FRAME=https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW170817/v3/H-H1_GWOSC_4KHZ_R1-1187006835-4096.gwf H1_CHANNEL=GWOSC-4KHZ_R1_STRAIN L1_FRAME=https://dcc.ligo.org/public/0144/T1700406/003/L-L1_CLEANED_HOFT_C02_T1700406_v3-1187008667-4096.gwf L1_CHANNEL=DCH-CLEAN_STRAIN_C02_T1700406_v3 -V1_FRAME=https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW170817/v3/V-V1_GWOSC_4KHZ_R1-1187006835-4096.gwf +V1_FRAME=https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW170817/v3/V-V1_GWOSC_4KHZ_R1-1187006835-4096.gwf V1_CHANNEL=GWOSC-4KHZ_R1_STRAIN echo -e "\\n\\n>> [`date`] Getting template bank" diff --git a/pycbc/catalog/catalog.py b/pycbc/catalog/catalog.py index 6f69c76f8ba..0f5ada0d92f 100644 --- a/pycbc/catalog/catalog.py +++ b/pycbc/catalog/catalog.py @@ -32,7 +32,7 @@ # FIXME with posteriors when available and we can just post-process that # LVC catalogs -base_lvc_url = "https://www.gw-openscience.org/eventapi/jsonfull/{}/" +base_lvc_url = "https://www.gwosc.org/eventapi/jsonfull/{}/" _catalogs = {'GWTC-1-confident': 'LVC', 'GWTC-1-marginal': 'LVC', 'Initial_LIGO_Virgo': 'LVC', diff --git a/pycbc/dq.py b/pycbc/dq.py index 171f01441bd..3a48b3eee29 100644 --- a/pycbc/dq.py +++ b/pycbc/dq.py @@ -101,7 +101,7 @@ def parse_veto_definer(veto_def_filename, ifos): return data -GWOSC_URL = 'https://www.gw-openscience.org/timeline/segments/json/{}/{}_{}/{}/{}/' +GWOSC_URL = 'https://www.gwosc.org/timeline/segments/json/{}/{}_{}/{}/{}/' def query_dqsegdb2(detector, flag_name, start_time, end_time, server): diff --git a/pycbc/frame/losc.py b/pycbc/frame/losc.py index b2b5c229b96..f5aff99e211 100644 --- a/pycbc/frame/losc.py +++ b/pycbc/frame/losc.py @@ -14,11 +14,11 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ -This modules contains functions for getting data from the LOSC +This modules contains functions for getting data from the GWOSC """ from pycbc.io import get_file -_losc_url = "https://www.gw-openscience.org/archive/links/%s/%s/%s/%s/json/" +_gwosc_url = "https://www.gwosc.org/archive/links/%s/%s/%s/%s/json/" def get_run(time, ifo=None): """ Return the run name for a given time @@ -90,7 +90,7 @@ def losc_frame_json(ifo, start_time, end_time): 'You have requested data that uses ' 'both %s and %s' % (run, run2)) - url = _losc_url % (run, ifo, int(start_time), int(end_time)) + url = _gwosc_url % (run, ifo, int(start_time), int(end_time)) try: return json.loads(urlopen(url).read().decode()) @@ -100,7 +100,7 @@ def losc_frame_json(ifo, start_time, end_time): 'ifo=%s, run=%s, between %s-%s' % (ifo, run, start_time, end_time)) def losc_frame_urls(ifo, start_time, end_time): - """ Get a list of urls to losc frame files + """ Get a list of urls to GWOSC frame files Parameters ---------- @@ -121,7 +121,7 @@ def losc_frame_urls(ifo, start_time, end_time): return [d['url'] for d in data if d['format'] == 'gwf'] def read_frame_losc(channels, start_time, end_time): - """ Read channels from losc data + """ Read channels from GWOSC data Parameters ---------- @@ -162,7 +162,7 @@ def read_frame_losc(channels, start_time, end_time): return ts def read_strain_losc(ifo, start_time, end_time): - """ Get the strain data from the LOSC data + """ Get the strain data from the GWOSC data Parameters ---------- From 90a940b0aeeee612b8369e0999209b841204c7cc Mon Sep 17 00:00:00 2001 From: Ian Harry Date: Mon, 10 Jul 2023 12:10:38 +0100 Subject: [PATCH 28/32] Avoid skymap v1.1.0 (#4429) --- companion.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/companion.txt b/companion.txt index 770ed63a7ca..5eb0fa72771 100644 --- a/companion.txt +++ b/companion.txt @@ -6,7 +6,7 @@ healpy # Needed for GraceDB uploads and skymap generation ligo-gracedb>=2.10.0 -ligo.skymap +ligo.skymap!=1.1.0 # Needed for ringdown pykerr From 0c7cfb452002ea4e761dda79e857c432477b215c Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Thu, 18 Jan 2024 15:30:22 +0100 Subject: [PATCH 29/32] Try to remove the LISA inference example --- docs/_include/inference_example_lisa_smbhb.sh | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 docs/_include/inference_example_lisa_smbhb.sh diff --git a/docs/_include/inference_example_lisa_smbhb.sh b/docs/_include/inference_example_lisa_smbhb.sh deleted file mode 100644 index 8376b58200b..00000000000 --- a/docs/_include/inference_example_lisa_smbhb.sh +++ /dev/null @@ -1,3 +0,0 @@ -sh ../../examples/inference/lisa_smbhb/get.sh -sh ../../examples/inference/lisa_smbhb/run.sh -python ../../examples/inference/lisa_smbhb/advanced_plot.py From 9d86314559de76a69cad7c9375d921d5aee5f4d5 Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Thu, 18 Jan 2024 15:38:10 +0100 Subject: [PATCH 30/32] Force pegasus 5.0.3 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7940c83cff4..91598875809 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ tqdm gwdatafind # Requirements for full pegasus env -pegasus-wms.api >= 5.0.3 +pegasus-wms.api == 5.0.3 # need to pin until pegasus for further upstream # addresses incompatibility between old flask/jinja2 and latest markupsafe markupsafe <= 2.0.1 From a531b313a092e4c29b1687e24c1b41f12bde7aa9 Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Mon, 24 Apr 2023 15:51:16 +0200 Subject: [PATCH 31/32] Fix Sphinx deprecation (#4331) --- docs/conf.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 3a5b989256c..bfb74726f45 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,7 +11,7 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys, os +import os import pycbc.version import subprocess import glob @@ -263,9 +263,10 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None, - 'h5py': ('http://docs.h5py.org/en/stable/', None), - } +intersphinx_mapping = { + 'python': ('http://docs.python.org/', None), + 'h5py': ('http://docs.h5py.org/en/stable/', None), +} napoleon_use_ivar = False From 89f4f1e12894f8cef2c60e5c3654769e35aa5be1 Mon Sep 17 00:00:00 2001 From: Tito Dal Canton Date: Fri, 19 Jan 2024 11:24:46 +0100 Subject: [PATCH 32/32] Try to fix a warning treated as error (duplicated entry in toctree) --- docs/inference.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/inference.rst b/docs/inference.rst index 980da5f079c..153e53ed124 100644 --- a/docs/inference.rst +++ b/docs/inference.rst @@ -513,7 +513,6 @@ Examples inference/examples/single.rst inference/examples/relative.rst inference/examples/hierarchical.rst - inference/examples/sampler_platter.rst inference/models.rst ===============================================