diff --git a/Dockerfile b/Dockerfile index 7002c97072a..97997bc5ab9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,7 +15,7 @@ RUN dnf -y install https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release- # set up environment RUN cd / && \ - mkdir -p /cvmfs/config-osg.opensciencegrid.org /cvmfs/oasis.opensciencegrid.org /cvmfs/gwosc.osgstorage.org && echo "config-osg.opensciencegrid.org /cvmfs/config-osg.opensciencegrid.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "oasis.opensciencegrid.org /cvmfs/oasis.opensciencegrid.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "gwosc.osgstorage.org /cvmfs/gwosc.osgstorage.org cvmfs ro,noauto 0 0" >> /etc/fstab && mkdir -p /oasis /scratch /projects /usr/lib64/slurm /var/run/munge && \ + mkdir -p /cvmfs/config-osg.opensciencegrid.org /cvmfs/software.igwn.org /cvmfs/gwosc.osgstorage.org && echo "config-osg.opensciencegrid.org /cvmfs/config-osg.opensciencegrid.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "software.igwn.org /cvmfs/software.igwn.org cvmfs ro,noauto 0 0" >> /etc/fstab && echo "gwosc.osgstorage.org /cvmfs/gwosc.osgstorage.org cvmfs ro,noauto 0 0" >> /etc/fstab && mkdir -p /oasis /scratch /projects /usr/lib64/slurm /var/run/munge && \ groupadd -g 1000 pycbc && useradd -u 1000 -g 1000 -d /opt/pycbc -k /etc/skel -m -s /bin/bash pycbc # Install MPI software needed for pycbc_inference @@ -37,7 +37,7 @@ ENV PATH "/usr/local/bin:/usr/bin:/bin:/lib64/openmpi/bin/bin" # Set the default LAL_DATA_PATH to point at CVMFS first, then the container. # Users wanting it to point elsewhere should start docker using: # docker -e LAL_DATA_PATH="/my/new/path" -ENV LAL_DATA_PATH "/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation:/opt/pycbc/pycbc-software/share/lal-data" +ENV LAL_DATA_PATH "/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation:/opt/pycbc/pycbc-software/share/lal-data" # When the container is started with # docker run -it pycbc/pycbc-el8:latest diff --git a/bin/live/pycbc_live_combine_single_fits b/bin/live/pycbc_live_combine_single_fits index d65cfc9b963..924d6fb2823 100644 --- a/bin/live/pycbc_live_combine_single_fits +++ b/bin/live/pycbc_live_combine_single_fits @@ -12,12 +12,15 @@ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. +"""Combine PyCBC Live single-detector trigger fitting parameters from several +different files.""" + import h5py, numpy as np, argparse import logging import pycbc -parser = argparse.ArgumentParser(usage="", - description="Combine fitting parameters from several different files") + +parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--verbose", action="store_true", help="Print extra debugging information", default=False) parser.add_argument("--trfits-files", nargs="+", required=True, @@ -30,7 +33,7 @@ parser.add_argument("--output", required=True, parser.add_argument("--ifos", required=True, nargs="+", help="list of ifos fo collect info for") -args=parser.parse_args() +args = parser.parse_args() pycbc.init_logging(args.verbose) @@ -42,8 +45,8 @@ if args.conservative_percentile < 50 or \ "otherwise it is either not a percentile, or not " "conservative.") -counts_all = {ifo:[] for ifo in args.ifos} -alphas_all = {ifo:[] for ifo in args.ifos} +counts_all = {ifo: [] for ifo in args.ifos} +alphas_all = {ifo: [] for ifo in args.ifos} analysis_dates = [] with h5py.File(args.trfits_files[0], 'r') as fit_f0: @@ -58,15 +61,15 @@ with h5py.File(args.trfits_files[0], 'r') as fit_f0: fit_thresh = fit_f0.attrs['fit_threshold'] fit_func = fit_f0.attrs['fit_function'] -live_times = {ifo : [] for ifo in args.ifos} +live_times = {ifo: [] for ifo in args.ifos} -trigger_file_times = [] +trigger_file_starts = [] +trigger_file_ends = [] n_files = len(args.trfits_files) logging.info("Checking through %d files", n_files) for f in args.trfits_files: - fits_f = h5py.File(f, 'r') # Check that the file uses the same setup as file 0, to make sure # all coefficients are comparable @@ -77,16 +80,17 @@ for f in args.trfits_files: assert all(fits_f['bins_lower'][:] == bl) assert all(fits_f['bins_upper'][:] == bu) - # Get the time of the last trigger in the trigger_fits file + # Get the time of the first/last triggers in the trigger_fits file gps_last = 0 + gps_first = np.inf for ifo in args.ifos: if ifo not in fits_f: continue - else: - trig_times = fits_f[ifo]['triggers']['end_time'][:] - gps_last = max(gps_last, - trig_times.max()) - trigger_file_times.append(gps_last) + trig_times = fits_f[ifo]['triggers']['end_time'][:] + gps_last = max(gps_last, trig_times.max()) + gps_first = min(gps_first, trig_times.min()) + trigger_file_starts.append(gps_first) + trigger_file_ends.append(gps_last) for ifo in args.ifos: if ifo not in fits_f: @@ -98,27 +102,27 @@ for f in args.trfits_files: counts_all[ifo].append(fits_f[ifo + '/counts'][:]) alphas_all[ifo].append(fits_f[ifo + '/fit_coeff'][:]) if any(np.isnan(fits_f[ifo + '/fit_coeff'][:])): - logging.info("nan in " + f + ", " + ifo) + logging.info("nan in %s, %s", f, ifo) logging.info(fits_f[ifo + '/fit_coeff'][:]) fits_f.close() -# Set up the date array, this is stored as an offset from the start date of -# the combination to the end of the trigger_fits file. -# This allows for missing or empty days +# Set up the date array, this is stored as an offset from the first trigger time of +# the first file to the last trigger of the file -trigger_file_times = np.array(trigger_file_times) -ad_order = np.argsort(np.array(trigger_file_times)) -start_date_n = trigger_file_times[ad_order[0]] -ad = trigger_file_times[ad_order] - start_date_n +trigger_file_starts = np.array(trigger_file_starts) +trigger_file_ends = np.array(trigger_file_ends) +ad_order = np.argsort(trigger_file_starts) +start_time_n = trigger_file_starts[ad_order[0]] +ad = trigger_file_ends[ad_order] - start_time_n -# Get the counts and alphas sorted by bin rather than by date +# Get the counts and alphas counts_bin = {ifo: [c for c in zip(*counts_all[ifo])] for ifo in args.ifos} alphas_bin = {ifo: [a for a in zip(*alphas_all[ifo])] for ifo in args.ifos} -alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} -counts_out = {ifo : np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos} -cons_alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} -cons_counts_out = {ifo : np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos} +alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} +counts_out = {ifo: np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos} +cons_alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos} +cons_counts_out = {ifo: np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos} logging.info("Writing results") fout = h5py.File(args.output, 'w') @@ -126,38 +130,34 @@ fout.attrs['fit_threshold'] = fit_thresh fout.attrs['conservative_percentile'] = args.conservative_percentile fout.attrs['ifos'] = args.ifos fout['bins_edges'] = list(bl) + [bu[-1]] -fout['fits_dates'] = ad + start_date_n +fout['fits_dates'] = ad + start_time_n for ifo in args.ifos: - fout.create_group(ifo) - fout[ifo].attrs['live_time'] = sum(live_times[ifo]) - -save_allmeanalpha = {} -for ifo in args.ifos: - fout_ifo = fout[ifo] logging.info(ifo) + fout_ifo = fout.create_group(ifo) l_times = np.array(live_times[ifo]) - count_all = np.sum(counts_bin[ifo], axis=0) / l_times - invalphan = np.array(counts_bin[ifo]) / np.array(alphas_bin[ifo]) - invalphan_all = np.mean(invalphan, axis=0) - alpha_all = np.mean(counts_bin[ifo], axis=0) / invalphan_all - meant = l_times.mean() - - fout_ifo[f'separate_fits/live_times'] = l_times - fout_ifo[f'separate_fits/date'] = ad + start_date_n + fout_ifo.attrs['live_time'] = l_times.sum() + + fout_ifo['separate_fits/live_times'] = l_times[ad_order] + fout_ifo['separate_fits/start_time'] = trigger_file_starts[ad_order] + fout_ifo['separate_fits/end_time'] = trigger_file_ends[ad_order] + for counter, a_c_u_l in enumerate(zip(alphas_bin[ifo], counts_bin[ifo], bu, bl)): a, c, u, l = a_c_u_l - a = np.array(a) - c = np.array(c) + # Sort alpha and counts by date + a = np.array(a)[ad_order] + c = np.array(c)[ad_order] invalphan = c / a mean_alpha = c.mean() / invalphan.mean() cons_alpha = np.percentile(a, 100 - args.conservative_percentile) cons_alphas_out[ifo][counter] = cons_alpha alphas_out[ifo][counter] = mean_alpha - cons_count = np.percentile(c, args.conservative_percentile) - cons_counts_out[ifo][counter] = cons_count * len(c) - counts_out[ifo][counter] = c.sum() + # To get the count values, we need to convert to rates and back again + r = c / l_times[ad_order] + cons_rate = np.percentile(r, args.conservative_percentile) + cons_counts_out[ifo][counter] = cons_rate * l_times[ad_order].sum() + counts_out[ifo][counter] = np.mean(r) * l_times[ad_order].sum() fout_ifo[f'separate_fits/bin_{counter:d}/fit_coeff'] = a fout_ifo[f'separate_fits/bin_{counter:d}/counts'] = c @@ -173,15 +173,15 @@ for ifo in args.ifos: # Take some averages for plotting and summary values overall_invalphan = counts_out[ifo] / alphas_out[ifo] overall_meanalpha = counts_out[ifo].mean() / overall_invalphan.mean() - sum_counts_out = counts_out[ifo].sum() / sum(live_times[ifo]) - save_allmeanalpha[ifo] = overall_meanalpha # For the fixed version, we just set this to 1 - fout_ifo['fixed/counts'] = [1 for c in counts_out[ifo]] - fout_ifo['fixed/fit_coeff'] = [0 for a in alphas_out[ifo]] + fout_ifo['fixed/counts'] = [1] * len(counts_out[ifo]) + fout_ifo['fixed/fit_coeff'] = [0] * len(alphas_out[ifo]) # Add some useful info to the output file - fout_ifo.attrs['mean_alpha'] = save_allmeanalpha[ifo] + fout_ifo.attrs['mean_alpha'] = overall_meanalpha fout_ifo.attrs['total_counts'] = counts_out[ifo].sum() fout.close() + +logging.info('Done') diff --git a/bin/live/pycbc_live_plot_combined_single_fits b/bin/live/pycbc_live_plot_combined_single_fits index 3b16be3eac2..7c88c6bd490 100644 --- a/bin/live/pycbc_live_plot_combined_single_fits +++ b/bin/live/pycbc_live_plot_combined_single_fits @@ -63,7 +63,8 @@ live_total = {} separate_alphas = {} separate_counts = {} -separate_dates = {} +separate_starts = {} +separate_ends = {} separate_times = {} logging.info("Loading Data") @@ -72,7 +73,6 @@ with h5py.File(args.combined_fits_file, 'r') as cff: bins_edges = cff['bins_edges'][:] conservative_percentile = cff.attrs['conservative_percentile'] n_bins = len(bins_edges) - 1 - fits_dates = cff['fits_dates'][:] for ifo in ifos: logging.info(ifo) live_total[ifo] = cff[ifo].attrs['live_time'] @@ -81,7 +81,8 @@ with h5py.File(args.combined_fits_file, 'r') as cff: cons_count[ifo] = cff[ifo]['conservative']['counts'][:] cons_alpha[ifo] = cff[ifo]['conservative']['fit_coeff'][:] - separate_dates[ifo] = cff[ifo]['separate_fits']['date'][:] + separate_starts[ifo] = cff[ifo]['separate_fits']['start_time'][:] + separate_ends[ifo] = cff[ifo]['separate_fits']['end_time'][:] separate_times[ifo] = cff[ifo]['separate_fits']['live_times'][:] separate_data = cff[ifo]['separate_fits'] @@ -108,6 +109,20 @@ def bin_proportion(upper, lower, log_spacing=False): else: return ((lower + upper) / 2. - bin_min) / (bin_max - bin_min) +# Set up the x ticks - note that these are rounded to the nearest +# midnight, so may not line up exactly with the data +min_start = min([separate_starts[ifo].min() for ifo in ifos]) +max_end = max([separate_ends[ifo].max() for ifo in ifos]) + +xtix = [] +xtix_labels = [] +t = min_start +while t < max_end: + # Strip off the time information, ticks are at midnight + time_dt = gpstime.gps_to_utc(t).date() + xtix_labels.append(time_dt.strftime("%Y-%m-%d")) + xtix.append(gpstime.utc_to_gps(time_dt).gpsSeconds) + t += 86400 logging.info("Plotting fits information") for ifo in ifos: @@ -119,24 +134,23 @@ for ifo in ifos: alpha_lines = [] count_lines = [] - start_date_dt = gpstime.gps_to_utc(separate_dates[ifo][0]) - start_date = start_date_dt.strftime("%Y-%m-%d %H:%M:%S") - for i, bl_bu in enumerate(zip(bin_starts, bin_ends)): bl, bu = bl_bu alphas = separate_alphas[ifo][i] counts = separate_counts[ifo][i] + # replace zeros with infinity, so that it is + # not plotted rather than plotted as zero valid = np.logical_and(alphas > 0, np.isfinite(alphas)) - + alphas[np.logical_not(valid)] = np.inf if not any(valid): + logging.warning("No valid fit coefficients for %s", ifo) continue - a = alphas[valid] - l_times = separate_times[ifo][valid] - rate = counts[valid] / l_times - ad = (separate_dates[ifo][valid] - separate_dates[ifo][0]) / 86400. + + l_times = separate_times[ifo] + rate = counts / l_times ma = mean_alpha[ifo][i] ca = cons_alpha[ifo][i] @@ -146,20 +160,19 @@ for ifo in ifos: bin_prop = bin_proportion(bu, bl, log_spacing=args.log_colormap) bin_colour = plt.get_cmap(args.colormap)(bin_prop) - - alpha_lines += ax_alpha.plot(ad, a, c=bin_colour, - label="duration %.2f-%.2f" % (bl, bu)) + bin_label = f"duration {bl:.2f}-{bu:.2f}" + alpha_lines += ax_alpha.plot(separate_starts[ifo], alphas, c=bin_colour, + label=bin_label) alpha_lines.append(ax_alpha.axhline(ma, label="total fit = %.2f" % ma, - c=bin_colour, linestyle='--',)) + c=bin_colour, linestyle='--',)) alpha_lab = f"{conservative_percentile:d}th %ile = {ca:.2f}" alpha_lines.append(ax_alpha.axhline(ca, c=bin_colour, linestyle=':', label=alpha_lab)) - count_lines += ax_count.plot(ad, rate, - c=bin_colour, - label="duration %.2f-%.2f" % (bl, bu)) + count_lines += ax_count.plot(separate_starts[ifo], rate, c=bin_colour, + label=bin_label) count_lines.append(ax_count.axhline(mr, c=bin_colour, linestyle='--', label=f"mean = {mr:.3f}")) @@ -168,21 +181,24 @@ for ifo in ifos: c=bin_colour, linestyle=':', label=count_lab)) - ax_alpha.set_xlabel('Days since ' + start_date) - ax_alpha.set_ylabel('Fit coefficient') - ax_alpha.set_xlim([ad[0], ad[-1]]) alpha_labels = [l.get_label() for l in alpha_lines] - ax_alpha.grid(zorder=-30) ax_alpha.legend(alpha_lines, alpha_labels, loc='lower center', - ncol=3, bbox_to_anchor=(0.5, 1.01)) - fig_alpha.tight_layout() - fig_alpha.savefig(args.output_plot_name_format.format(ifo=ifo, type='fit_coeffs')) - ax_count.set_xlabel('Days since ' + start_date) - ax_count.set_ylabel('Counts per live time') - ax_count.set_xlim([ad[0], ad[-1]]) + ncol=5, bbox_to_anchor=(0.5, 1.01)) + ax_alpha.set_ylabel('Fit coefficient') + count_labels = [l.get_label() for l in count_lines] ax_count.legend(count_lines, count_labels, loc='lower center', - ncol=3, bbox_to_anchor=(0.5, 1.01)) - ax_count.grid(zorder=-30) + ncol=5, bbox_to_anchor=(0.5, 1.01)) + ax_count.set_ylabel('Rate of triggers above fit threshold [s$^{-1}$]') + + for ax in [ax_count, ax_alpha]: + ax.set_xticks(xtix) + ax.set_xticklabels(xtix_labels, rotation=90) + # Add 1/4 day padding either side + ax.set_xlim(xtix[0] - 21600, xtix[-1] + 21600) + ax.grid(zorder=-30) + fig_count.tight_layout() fig_count.savefig(args.output_plot_name_format.format(ifo=ifo, type='counts')) + fig_alpha.tight_layout() + fig_alpha.savefig(args.output_plot_name_format.format(ifo=ifo, type='fit_coeffs')) diff --git a/bin/pycbc_live b/bin/pycbc_live index 78fceaa6306..763d4ef0f09 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -15,6 +15,7 @@ import os.path import itertools import platform import subprocess +import multiprocessing from multiprocessing.dummy import threading from matplotlib import use use('agg') @@ -79,6 +80,7 @@ class LiveEventManager(object): def __init__(self, args, bank): self.low_frequency_cutoff = args.low_frequency_cutoff self.bank = bank + self.skymap_only_ifos = [] if args.skymap_only_ifos is None else list(set(args.skymap_only_ifos)) # Figure out what we are supposed to process within the pool of MPI processes self.comm = mpi.COMM_WORLD @@ -107,18 +109,26 @@ class LiveEventManager(object): # Keep track of which events have been uploaded self.last_few_coincs_uploaded = [] + # Give this a nominal value, it is required but not used + self.fu_cores = None if self.run_snr_optimization: # preestimate the number of CPU cores that we can afford giving # to followup processes without slowing down the main search - available_cores = len(os.sched_getaffinity(0)) bg_cores = len(tuple(itertools.combinations(ifos, 2))) analysis_cores = 1 + bg_cores - self.fu_cores = available_cores - analysis_cores - if self.fu_cores <= 0: - logging.warning('Insufficient number of CPU cores (%d) to ' - 'run search and trigger followups. Uploaded ' - 'triggers will momentarily increase the lag', - available_cores) + if platform.system() != 'Darwin': + available_cores = len(os.sched_getaffinity(0)) + self.fu_cores = available_cores - analysis_cores + if self.fu_cores <= 0: + logging.warning( + 'Insufficient number of CPU cores (%d) to ' + 'run search and trigger followups. Uploaded ' + 'triggers will momentarily increase the lag', + available_cores + ) + self.fu_cores = 1 + else: + # To enable mac testing, this is just set to 1 self.fu_cores = 1 if args.enable_embright_has_massgap: @@ -212,7 +222,7 @@ class LiveEventManager(object): ifo, triggers, pvalue_info, - recalculate_ifar=recalculate_ifar + recalculate_ifar=recalculate_ifar and ifo not in self.skymap_only_ifos ) # the SNR time series sample rate can vary slightly due to @@ -276,9 +286,9 @@ class LiveEventManager(object): triggers[f'foreground/pvalue_{ifo}_saturated'] = pv_sat def setup_optimize_snr( - self, results, live_ifos, triggering_ifos, fname, gid + self, results, live_ifos, triggering_ifos, fname ): - """Setup and start the network SNR optimization process for a + """Setup the network SNR optimization process for a candidate event. See arXiv:2008.07494 for details. Parameters @@ -295,8 +305,6 @@ class LiveEventManager(object): File name where the candidate information has been saved. Used to name other files produced as part of setting up the SNR optimization. - gid : str - GraceDB ID of the candidate. """ template_id = results[f'foreground/{live_ifos[0]}/template_id'] p = props(self.bank.table[template_id]) @@ -352,8 +360,6 @@ class LiveEventManager(object): hdfp['ifar'] = results['foreground/ifar'] if 'p_terr' in results: hdfp['p_terr'] = results['p_terr'] - if gid is not None: - hdfp['gid'] = gid for ifo in args.channel_name: hdfp[f'channel_names/{ifo}'] = args.channel_name[ifo] @@ -383,7 +389,9 @@ class LiveEventManager(object): if self.enable_gracedb_upload: cmd += '--enable-gracedb-upload ' - cmd += '--cores {} '.format(self.fu_cores) + if self.fu_cores: + cmd += '--cores {} '.format(self.fu_cores) + if args.processing_scheme: # we will use the cores for multiple workers of the # optimization routine, so we force the processing scheme @@ -396,6 +404,32 @@ class LiveEventManager(object): opt_scheme = args.processing_scheme.split(':')[0] cmd += '--processing-scheme {}:1 '.format(opt_scheme) + # Save the command which would be used: + snroc_fname = os.path.join(out_dir_path, 'snr_optimize_command.txt') + with open(snroc_fname,'w') as snroc_file: + snroc_file.write(cmd) + + return cmd, out_dir_path + + def run_optimize_snr(self, cmd, out_dir_path, attribute_fname, gid): + """ + Run the optimize_snr instance setup up by setup_optimize_snr + + Parameters + ---------- + cmd: str + Command to tell subprocess what to run for the + optimize_snr process + out_dir_path: os.path or str + Place for storing the SNR optimization results and log + attribute_fname: str + The filename of the attributes of the candidate + gid: str + GraceDB ID of the candidate + """ + if gid is not None: + with h5py.File(attribute_fname, 'a') as hdfp: + hdfp['gid'] = gid log_fname = os.path.join(out_dir_path, 'optimize_snr.log') logging.info('running %s with log to %s', cmd, log_fname) @@ -416,13 +450,13 @@ class LiveEventManager(object): # reload_buffer time. These args are documented here: # https://ligo-gracedb.readthedocs.io/en/latest/api.html#ligo.gracedb.rest.GraceDb # Because we do not change any of the request session values when running the - # code, it should remain thread safe. + # code, it should remain thread safe. gdbargs = {'reload_certificate': True, 'reload_buffer': 300} if self.gracedb_server: gdbargs['service_url'] = self.gracedb_server self.gracedb = GraceDb(**gdbargs) - def upload_in_thread(self, event, fname, comment, results, live_ifos, ifos, + def upload_in_thread(self, event, fname, comment, cmd, out_dir_path, upload_checks, optimize_snr_checks): gid = None if upload_checks: @@ -434,13 +468,14 @@ class LiveEventManager(object): search=self.gracedb_search, labels=self.gracedb_labels ) + if optimize_snr_checks: - self.setup_optimize_snr( - results, - live_ifos, - ifos, - fname, - gid + logging.info('Optimizing SNR for event above threshold ..') + self.run_optimize_snr( + cmd, + out_dir_path, + fname.replace('.xml.gz', '_attributes.hdf'), + gid ) def check_coincs(self, ifos, coinc_results, psds): @@ -457,7 +492,8 @@ class LiveEventManager(object): logging.info('computing followup data for coinc') coinc_ifos = coinc_results['foreground/type'].split('-') - followup_ifos = list(set(ifos) - set(coinc_ifos)) + followup_ifos = set(ifos) - set(coinc_ifos) + followup_ifos = list(followup_ifos | set(self.skymap_only_ifos)) double_ifar = coinc_results['foreground/ifar'] if double_ifar < args.ifar_double_followup_threshold: @@ -503,7 +539,7 @@ class LiveEventManager(object): ifar = coinc_results['foreground/ifar'] upload_checks = self.enable_gracedb_upload and self.ifar_upload_threshold < ifar optimize_snr_checks = self.run_snr_optimization and self.ifar_upload_threshold < ifar - + # Keep track of the last few coincs uploaded in order to # prevent singles being uploaded as well for coinc events self.last_few_coincs_uploaded.append(event.merger_time) @@ -511,17 +547,33 @@ class LiveEventManager(object): self.last_few_coincs_uploaded = \ self.last_few_coincs_uploaded[-10:] + # Save the event if not upload_checks: event.save(fname) + + # Save the event info/data and what _would_ be run for SNR optimizer, + # even if not running it - do this before the thread so no + # data buffers move on in a possible interim period + + # Which IFOs were active? + live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] + + # Tell SNR optimized event about p_terr + if hasattr(event, 'p_terr') and event.p_terr is not None: + coinc_results['p_terr'] = event.p_terr + + # Do the optimizer setup + cmd, out_dir_path = self.setup_optimize_snr( + coinc_results, + live_ifos, + coinc_ifos, + fname, + ) + + # Now start the thread to run the SNR optimizer if upload_checks or optimize_snr_checks: - if optimize_snr_checks: - logging.info('Optimizing SNR for coinc above threshold ..') - # Tell snr optimized event about p_terr - if hasattr(event, 'p_terr') and event.p_terr is not None: - coinc_results['p_terr'] = event.p_terr - live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] thread_args = ( - event, fname, comment, coinc_results, live_ifos, coinc_ifos, + event, fname, comment, cmd, out_dir_path, upload_checks, optimize_snr_checks ) gdb_upload_thread = threading.Thread(target=self.upload_in_thread, @@ -543,6 +595,7 @@ class LiveEventManager(object): logging.info(f'Found {ifo} single with ifar {sifar}') followup_ifos = [i for i in active if i is not ifo] + followup_ifos = list(set(followup_ifos) | set(self.skymap_only_ifos)) # Don't recompute ifar considering other ifos sld = self.compute_followup_data( [ifo], @@ -580,10 +633,12 @@ class LiveEventManager(object): comment = comment.format(ifo, ppdets(followup_ifos)) # Has a coinc event at this time been uploaded recently? - # If so, skip upload + # If so, skip upload - Note that this means that we _always_ + # prefer an uploaded coincident event over a single coinc_tdiffs = abs(event.merger_time - numpy.array(self.last_few_coincs_uploaded)) nearby_coincs = coinc_tdiffs < 0.1 + if any(nearby_coincs): logging.info( "Single-detector candidate at time %.3f was already " @@ -598,17 +653,38 @@ class LiveEventManager(object): self.ifar_upload_threshold < single['foreground/ifar'] and \ not any(nearby_coincs) and \ self.run_snr_optimization + + # Save the event if not upload_checks: event.save(fname) + + # Save the event info/data and what _would_ be run for SNR optimizer, + # even if not running it - do this before the thread so no + # data buffers move on in a possible interim period + + if any(nearby_coincs): + # Don't even save the snr optimization stuff for singles + # where there is already a coinc + continue + + # Which IFOs were active? + live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] + + # Tell SNR optimized event about p_terr + if hasattr(event, 'p_terr') and event.p_terr is not None: + single['p_terr'] = event.p_terr + + # Do the optimizer setup + cmd, out_dir_path = self.setup_optimize_snr( + single, + live_ifos, + [ifo], + fname, + ) + if upload_checks or optimize_snr_checks: - if optimize_snr_checks: - logging.info('Optimizing SNR for single above threshold ..') - # Tell snr optimized event about p_terr - if hasattr(event, 'p_terr') and event.p_terr is not None: - single['p_terr'] = event.p_terr - live_ifos = [ifo for ifo in sld if 'snr_series' in sld[ifo]] thread_args = ( - event, fname, comment, single, live_ifos, [ifo], + event, fname, comment, cmd, out_dir_path, upload_checks, optimize_snr_checks ) gdb_upload_thread = threading.Thread(target=self.upload_in_thread, @@ -790,6 +866,17 @@ parser.add_argument('--autogating-taper', type=float, metavar='SECONDS', help='Taper the strain before and after ' 'each gating window over a duration ' 'of SECONDS.') +parser.add_argument('--autogating-duration', type=float, default=16, + metavar='SECONDS', + help='Amount of data in seconds to apply autogating on.') +parser.add_argument('--autogating-psd-segment-length', type=float, default=2, + metavar='SECONDS', + help='Length in seconds of each segment used to estimate the PSD ' + 'with Welchs method and median averaging.') +parser.add_argument('--autogating-psd-stride', type=float, default=1, + metavar='SECONDS', + help='Length in seconds of the overlap between each segment used ' + 'to estimate the PSD with Welchs method and median averaging.') parser.add_argument('--sync', action='store_true', help="Imposes an MPI synchronization at each transfer of" @@ -849,8 +936,6 @@ parser.add_argument('--ifar-double-followup-threshold', type=float, required=Tru parser.add_argument('--pvalue-combination-livetime', type=float, required=True, help="Livetime used for p-value combination with followup " "detectors, in years") -parser.add_argument('--enable-single-detector-background', action='store_true', default=False) - parser.add_argument('--enable-gracedb-upload', action='store_true', default=False, help='Upload triggers to GraceDB') parser.add_argument('--enable-production-gracedb-upload', action='store_true', default=False, @@ -899,6 +984,8 @@ parser.add_argument('--enable-embright-has-massgap', action='store_true', defaul parser.add_argument('--embright-massgap-max', type=float, default=5.0, metavar='SOLAR MASSES', help='Upper limit of the mass gap, used for estimating ' 'HasMassGap probability.') +parser.add_argument('--skymap-only-ifos', nargs='+', + help="Detectors that only contribute in sky localization") scheme.insert_processing_option_group(parser) LiveSingle.insert_args(parser) @@ -909,8 +996,11 @@ mchirp_area.insert_args(parser) livepau.insert_live_pastro_option_group(parser) args = parser.parse_args() + scheme.verify_processing_options(args, parser) fft.verify_fft_options(args, parser) +ifos = set(args.channel_name.keys()) +analyze_singles = LiveSingle.verify_args(args, parser, ifos) if args.output_background is not None and len(args.output_background) != 2: parser.error('--output-background takes two parameters: period and path') @@ -937,10 +1027,10 @@ if bank.min_f_lower < args.low_frequency_cutoff: 'minimum f_lower across all templates ' '({} Hz)'.format(args.low_frequency_cutoff, bank.min_f_lower)) -ifos = set(args.channel_name.keys()) logging.info('Analyzing data from detectors %s', ppdets(ifos)) evnt = LiveEventManager(args, bank) +logging.info('Detectors that only aid in the sky localization %s', ppdets(evnt.skymap_only_ifos)) # include MPI rank and functional description into proctitle task_name = 'root' if evnt.rank == 0 else 'filtering' @@ -1012,13 +1102,14 @@ with ctx: evnt.data_readers = data_reader # create single-detector background "estimators" - if args.enable_single_detector_background and evnt.rank == 0: + if analyze_singles and evnt.rank == 0: sngl_estimator = {ifo: LiveSingle.from_cli(args, ifo) for ifo in ifos} # Create double coincident background estimator for every combo if args.enable_background_estimation and evnt.rank == 0: - ifo_combos = itertools.combinations(ifos, 2) + trigg_ifos = [ifo for ifo in ifos if ifo not in evnt.skymap_only_ifos] + ifo_combos = itertools.combinations(trigg_ifos, 2) estimators = [] for combo in ifo_combos: logging.info('Will calculate %s background', ppdets(combo, "-")) @@ -1081,19 +1172,17 @@ with ctx: else: psd_count[ifo] -= 1 - if data_reader[ifo].psd is not None: - dist = data_reader[ifo].psd.dist - if dist < args.min_psd_abort_distance or dist > args.max_psd_abort_distance: - logging.info("%s PSD dist %s outside acceptable range [%s, %s]", - ifo, dist, args.min_psd_abort_distance, - args.max_psd_abort_distance) - status = False + status &= data_reader[ifo].check_psd_dist( + args.min_psd_abort_distance, + args.max_psd_abort_distance + ) if status is True: - evnt.live_detectors.add(ifo) - if evnt.rank > 0: - logging.info('Filtering %s', ifo) - results[ifo] = mf.process_data(data_reader[ifo]) + if ifo not in evnt.skymap_only_ifos: + evnt.live_detectors.add(ifo) + if evnt.rank > 0: + logging.info('Filtering %s', ifo) + results[ifo] = mf.process_data(data_reader[ifo]) else: logging.info('Insufficient data for %s analysis', ifo) @@ -1152,7 +1241,7 @@ with ctx: evnt.check_coincs(list(results.keys()), best_coinc, psds) # Check for singles - if args.enable_single_detector_background: + if analyze_singles: evnt.check_singles(results, psds) gates = {ifo: data_reader[ifo].gate_params for ifo in data_reader} diff --git a/bin/pycbc_losc_segment_query b/bin/pycbc_losc_segment_query index 9476349bc62..4403af29168 100644 --- a/bin/pycbc_losc_segment_query +++ b/bin/pycbc_losc_segment_query @@ -41,8 +41,8 @@ def query_gwosc(ifo, segment_name, gps_start_time, duration): """ response = urlopen( - 'https://www.gw-openscience.org/timeline/segments/json/O1/{}_{}/{}/{}/'.format( - ifo, segment_name, gps_start_time, duration)) + f'https://www.gwosc.org/timeline/segments/json/O1/{ifo}_{segment_name}/{gps_start_time}/{duration}/' + ) logging.info(response.info()) json_segment_data = json.loads(response.read()) diff --git a/bin/pycbc_optimize_snr b/bin/pycbc_optimize_snr index 8c6b05059d1..f6c51c7b135 100755 --- a/bin/pycbc_optimize_snr +++ b/bin/pycbc_optimize_snr @@ -15,10 +15,7 @@ from pycbc import ( from pycbc.types import MultiDetOptionAction, zeros, load_frequencyseries from pycbc.filter import matched_filter_core import pycbc.waveform.bank -from pycbc.conversions import ( - mchirp_from_mass1_mass2, mtotal_from_mchirp_eta, - mass1_from_mtotal_eta, mass2_from_mtotal_eta -) +import pycbc.conversions as cv from pycbc.io import live from pycbc.io.hdf import load_hdf5_to_dict from pycbc.detector import Detector @@ -29,6 +26,16 @@ try: except: ps = None +# Set a minimum mass for points tried in optimization allowing for +# minimal slop relative to the lightest template +MIN_CPT_MASS = 0.99 + +# Set a large maximum total mass +MAX_MTOTAL = 500. + +# Set a notional maximum and minimum possible eta +MAX_ETA = 0.24999999999 +MIN_ETA = 0.01 Nfeval = 0 start_time = time.time() @@ -56,12 +63,12 @@ def compute_network_snr_core(v, *argv, raise_err=False): delta_f = argv[7] sample_rate = argv[8] distance = 1.0 / DYN_RANGE_FAC - mtotal = mtotal_from_mchirp_eta(v[0], v[1]) - mass1 = mass1_from_mtotal_eta(mtotal, v[1]) - mass2 = mass2_from_mtotal_eta(mtotal, v[1]) + mtotal = cv.mtotal_from_mchirp_eta(v[0], v[1]) + mass1 = cv.mass1_from_mtotal_eta(mtotal, v[1]) + mass2 = cv.mass2_from_mtotal_eta(mtotal, v[1]) # enforce broadly accepted search space boundaries - if mass1 < 1 or mass2 < 1 or mtotal > 500: + if mass1 < MIN_CPT_MASS or mass2 < MIN_CPT_MASS or mtotal > MAX_MTOTAL: return -numpy.inf, {} try: @@ -70,7 +77,7 @@ def compute_network_snr_core(v, *argv, raise_err=False): approximant=approximant, mass1=mass1, mass2=mass2, spin1z=v[2], spin2z=v[3], f_lower=flow, f_final=f_end, delta_f=delta_f, - delta_t=1.0/sample_rate, distance=distance) + delta_t=1./sample_rate, distance=distance) except RuntimeError: if raise_err: raise @@ -119,8 +126,8 @@ def compute_minus_network_snr(v, *argv): def compute_minus_network_snr_pso(v, *argv, **kwargs): argv = kwargs['args'] - nsnr_array = numpy.array([-compute_network_snr_core(v_i, *argv)[0] for v_i in v]) - return nsnr_array + nsnr_array = numpy.array([compute_network_snr_core(v_i, *argv)[0] for v_i in v]) + return -nsnr_array def optimize_di(bounds, cli_args, extra_args): @@ -224,6 +231,10 @@ parser.add_argument('--approximant', required=True, parser.add_argument('--snr-threshold', default=4.0, help='If the SNR in ifo X is below this threshold do not ' 'consider it part of the coincidence. Not implemented') +parser.add_argument('--chirp-time-f-lower', default=20., + help='Starting frequency for chirp time window (Hz).') +parser.add_argument('--chirp-time-window', default=2., + help='Chirp time window (s).') parser.add_argument('--gracedb-server', metavar='URL', help='URL of GraceDB server API for uploading events. ' 'If not provided, the default URL is used.') @@ -327,21 +338,41 @@ sample_rate = fp['sample_rate'][()] extra_args = [data, coinc_times, coinc_ifos, flen, approximant, flow, f_end, delta_f, sample_rate] -mchirp = mchirp_from_mass1_mass2( - fp['mass1'][()], - fp['mass2'][()] -) -minchirp = mchirp * (1 - mchirp / 50.0) -maxchirp = mchirp * (1 + mchirp / 50.0) -minchirp = 1 if minchirp < 1 else minchirp -maxchirp = 80 if maxchirp > 80 else maxchirp - -# boundary of the optimization space (dict of (min, max) tuples) +# Determine chirp mass bounds from constant chirp time window +tau0flow = args.chirp_time_f_lower +tau0 = cv.tau0_from_mass1_mass2(fp['mass1'][()], fp['mass2'][()], tau0flow) +# Arbitrarily set max mchirp to 200 Msun if tau0 - window is too small +mintau0 = max(tau0 - args.chirp_time_window, + cv.tau0_from_mchirp(200., tau0flow)) +maxtau0 = tau0 + args.chirp_time_window +minchirp = cv.mchirp_from_tau0(maxtau0, tau0flow) +maxchirp = cv.mchirp_from_tau0(mintau0, tau0flow) +# Check basic sanity +assert minchirp > 0.5 +assert minchirp < maxchirp +assert maxchirp < 250. + +# Establish minimum eta: find the most asymmetric mass point +# nb function name is 'mass2' but it doesn't enforce m2 < m1! +maxm1 = cv._mass2_from_mchirp_mass1(maxchirp, MIN_CPT_MASS) +mineta = max(cv.eta_from_mass1_mass2(maxm1, MIN_CPT_MASS), MIN_ETA) + +# Use max chirp and max eta to find the upper bound on mass2 +maxm2 = cv.mass2_from_mchirp_eta(maxchirp, MAX_ETA) + +# Astrophysical spin bounds dependent on system masses - https://inspirehep.net/literature/1124003 +# If masses can be > 3 then allow them BH spin range +minspin1z = -0.4 if maxm1 < 3 else -0.9 +maxspin1z = 0.4 if maxm1 < 3 else 0.9 +minspin2z = -0.4 if maxm2 < 3 else -0.9 +maxspin2z = 0.4 if maxm2 < 3 else 0.9 + +# Boundary of the optimization space bounds = { 'mchirp': (minchirp, maxchirp), - 'eta': (0.01, 0.2499), - 'spin1z': (-0.9, 0.9), - 'spin2z': (-0.9, 0.9) + 'eta': (mineta, MAX_ETA), + 'spin1z': (minspin1z, maxspin1z), + 'spin2z': (minspin2z, maxspin2z) } with scheme_context: @@ -360,9 +391,9 @@ with scheme_context: _, snr_series_dict = compute_network_snr_core(opt_params, *extra_args) -mtotal = mtotal_from_mchirp_eta(opt_params[0], opt_params[1]) -mass1 = mass1_from_mtotal_eta(mtotal, opt_params[1]) -mass2 = mass2_from_mtotal_eta(mtotal, opt_params[1]) +mtotal = cv.mtotal_from_mchirp_eta(opt_params[0], opt_params[1]) +mass1 = cv.mass1_from_mtotal_eta(mtotal, opt_params[1]) +mass2 = cv.mass2_from_mtotal_eta(mtotal, opt_params[1]) spin1z = opt_params[2] spin2z = opt_params[3] diff --git a/companion.txt b/companion.txt index 770ed63a7ca..5eb0fa72771 100644 --- a/companion.txt +++ b/companion.txt @@ -6,7 +6,7 @@ healpy # Needed for GraceDB uploads and skymap generation ligo-gracedb>=2.10.0 -ligo.skymap +ligo.skymap!=1.1.0 # Needed for ringdown pykerr diff --git a/docker/etc/docker-install.sh b/docker/etc/docker-install.sh index 68d31814e96..72246631395 100755 --- a/docker/etc/docker-install.sh +++ b/docker/etc/docker-install.sh @@ -11,11 +11,11 @@ mkdir -p /opt/pycbc/src cp -a /scratch /opt/pycbc/src/pycbc chmod a+rw /dev/fuse mount /cvmfs/config-osg.opensciencegrid.org -mount /cvmfs/oasis.opensciencegrid.org +mount /cvmfs/software.igwn.org mkdir -p /opt/pycbc/pycbc-software/share/lal-data -rsync --exclude='SEOBNRv1ROM*' --exclude='SEOBNRv2ROM_DS_HI_v1.0.hdf5' --exclude='NRSur4d2s_FDROM.hdf5' -ravz /cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation/ /opt/pycbc/pycbc-software/share/lal-data/ +rsync --exclude='SEOBNRv1ROM*' --exclude='SEOBNRv2ROM_DS_HI_v1.0.hdf5' --exclude='NRSur4d2s_FDROM.hdf5' -ravz /cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation/ /opt/pycbc/pycbc-software/share/lal-data/ umount /cvmfs/config-osg.opensciencegrid.org -umount /cvmfs/oasis.opensciencegrid.org +umount /cvmfs/software.igwn.org chown -R 1000:1000 /opt/pycbc/src /opt/pycbc/pycbc-software chmod -R u=rwX,g=rX,o=rX /opt/pycbc exit 0 diff --git a/docs/_include/inference_example_lisa_smbhb.sh b/docs/_include/inference_example_lisa_smbhb.sh deleted file mode 100644 index 8376b58200b..00000000000 --- a/docs/_include/inference_example_lisa_smbhb.sh +++ /dev/null @@ -1,3 +0,0 @@ -sh ../../examples/inference/lisa_smbhb/get.sh -sh ../../examples/inference/lisa_smbhb/run.sh -python ../../examples/inference/lisa_smbhb/advanced_plot.py diff --git a/docs/building_bundled_executables.rst b/docs/building_bundled_executables.rst index d8d014047c6..612711b4eed 100644 --- a/docs/building_bundled_executables.rst +++ b/docs/building_bundled_executables.rst @@ -96,7 +96,7 @@ The script executes ``pycbc_inspiral`` as part of the build process. This may require LAL data at build time. The LAL data can be given with the command line argument:: - --with-lal-data-path=/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation + --with-lal-data-path=/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation The default command line arguments clone PyCBC from the standard GitHub repository. If you would like to build a bundle using code from your own @@ -130,6 +130,6 @@ Building Releases for CVMFS To build a release of ``pycbc_inspiral`` for installation in CVMFS, run the script with the arguments:: - pycbc_build_eah.sh --lalsuite-commit=a3a5a476d33f169b8749e2840c306a48df63c936 --pycbc-commit=b68832784969a47fe2658abffb3888ee06cd1be4 --with-extra-libs=file:///home/pycbc/build/composer_xe_2015.0.090.tar.gz --with-lal-data-path=/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation + pycbc_build_eah.sh --lalsuite-commit=a3a5a476d33f169b8749e2840c306a48df63c936 --pycbc-commit=b68832784969a47fe2658abffb3888ee06cd1be4 --with-extra-libs=file:///home/pycbc/build/composer_xe_2015.0.090.tar.gz --with-lal-data-path=/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation changing the ``--lalsuite-commit``, ``--pycbc-commit``, and ``--with-lal-data-path`` options to the values for the release. diff --git a/docs/conf.py b/docs/conf.py index 3a5b989256c..bfb74726f45 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -11,7 +11,7 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys, os +import os import pycbc.version import subprocess import glob @@ -263,9 +263,10 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None, - 'h5py': ('http://docs.h5py.org/en/stable/', None), - } +intersphinx_mapping = { + 'python': ('http://docs.python.org/', None), + 'h5py': ('http://docs.h5py.org/en/stable/', None), +} napoleon_use_ivar = False diff --git a/docs/dataquality.rst b/docs/dataquality.rst index 2d7cdc64cf9..ac3aa14cef5 100644 --- a/docs/dataquality.rst +++ b/docs/dataquality.rst @@ -27,8 +27,8 @@ Finding times of hardware injections What flags can I query? ======================== -A list of many of the flags which can be quiered is `available here `_. Instead, just give the -raw name such as "DATA" instead of "H1_DATA". +A list of many of the flags which can be quiered is `available here `_. +Instead, just give the raw name such as "DATA" instead of "H1_DATA". There are two additional types of flags which can be queried. These are the negation of the flags like "NO_CBC_HW_INJ". The flag "CBC_HW_INJ" would diff --git a/docs/inference.rst b/docs/inference.rst index 7ae2b8a7c8d..153e53ed124 100644 --- a/docs/inference.rst +++ b/docs/inference.rst @@ -513,7 +513,7 @@ Examples inference/examples/single.rst inference/examples/relative.rst inference/examples/hierarchical.rst - /inference/examples/lisa_smbhb.rst + inference/models.rst =============================================== Visualizing the Posteriors diff --git a/docs/inference/examples/gw150914.rst b/docs/inference/examples/gw150914.rst index 36dfc5cd0c6..2ae360c57e7 100644 --- a/docs/inference/examples/gw150914.rst +++ b/docs/inference/examples/gw150914.rst @@ -9,12 +9,12 @@ as was :ref:`used for the simulated BBH example`. We only data configuration file, so that we will run on real gravitational-wave data. First, we need to download the data from the `Gravitational Wave Open Science -Center `_. Run: +Center `_. Run: .. code-block:: bash - wget https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW150914/v3/H-H1_GWOSC_16KHZ_R1-1126257415-4096.gwf - wget https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW150914/v3/L-L1_GWOSC_16KHZ_R1-1126257415-4096.gwf + wget https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW150914/v3/H-H1_GWOSC_16KHZ_R1-1126257415-4096.gwf + wget https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW150914/v3/L-L1_GWOSC_16KHZ_R1-1126257415-4096.gwf This will download the appropriate data ("frame") files to your current working directory. You can now use the following data configuration file: diff --git a/docs/inference/examples/lisa_smbhb.rst b/docs/inference/examples/lisa_smbhb.rst deleted file mode 100644 index 2ceffbd5e71..00000000000 --- a/docs/inference/examples/lisa_smbhb.rst +++ /dev/null @@ -1,68 +0,0 @@ ----------------------------------------------- -LISA parameter estimation for simulated SMBHB ----------------------------------------------- - -This example shows how to use PyCBC for parameter estimation of supermassive black hole binaries (SMBHB) -in LISA mock data. The `data `_ are generated from -`LISA Data Challenge 2a: Sangria `_, -and `BBHx `_ package is used to generate the ``IMRPhenomD`` template and calculate -the corresponding TDI response for LISA. Relative binning (heterodyned likelihood) -is used during sampling to speed up the computation of likelihood functions. Before doing parameter estimation, -you need to install `BBHx `_ and `the corresponding PyCBC waveform plugin `_, -please click the corresponding link to see the detailed description of the installation. - -First, we create the following configuration file, here we just set chirp mass, mass ratio and tc as variable parameters, -`tc`, `eclipticlongitude`, `eclipticlatitude` and `polarization` are defined in the LISA frame: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/lisa_smbhb_relbin.ini - :language: ini - -:download:`Download <../../../examples/inference/lisa_smbhb/lisa_smbhb_relbin.ini>` - -By setting the model name to ``relative`` we are using -:py:class:`Relative ` model. - -In this simple example, we do the parameter estimation for the first SMBHB signal in the LDC Sangria dataset -(you can also run parameter estimation for other SMBHB signals by choosing appropriate prior range), -we need download the data first (`MBHB_params_v2_LISA_frame.pkl` contains all the true parameters): - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/get.sh - :language: bash - -:download:`Download <../../../examples/inference/lisa_smbhb/get.sh>` - -Now run: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/run.sh - :language: bash - -:download:`Download <../../../examples/inference/lisa_smbhb/run.sh>` - -This will run the ``dynesty`` sampler. When it is done, you will have a file called -``lisa_smbhb.hdf`` which contains the results. It should take about three minutes to -run. - -To plot the posterior distribution after the last iteration, you can run the following simplified script: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/plot.sh - :language: bash - -:download:`Download <../../../examples/inference/lisa_smbhb/plot.sh>` - -Or you can run the advanced one: - -.. literalinclude:: ../../../examples/inference/lisa_smbhb/advanced_plot.py - :language: python - -:download:`Download <../../../examples/inference/lisa_smbhb/advanced_plot.py>` - -You can modify this advanced plot script to generate the posterior of any SMBHB signals in the LDC Sangria dataset. -In this example it will create the following plot: - -.. image:: ../../_include/lisa_smbhb_mass_tc_0.png - :scale: 60 - :align: center - -The scatter points show each walker's position after the last iteration. The -points are colored by the SNR at that point, with the 50th and 90th -percentile contours drawn. The red lines represent the true parameters of injected signal. diff --git a/docs/inference/models.rst b/docs/inference/models.rst new file mode 100644 index 00000000000..a3aa15746d2 --- /dev/null +++ b/docs/inference/models.rst @@ -0,0 +1,212 @@ +.. _models_detailed: + +--------------------------------------------- +Details of common Models in PyCBC Inference +--------------------------------------------- + +Commonly used likelihood models are compared below. PyCBC Inference +also has the capability to interface with external models. See the +`interactive tutorial `_ if you'd like to learn how to add you own +model for handling a new problem such as GRBs, Kilonova or whatever you can +imagine. + +============================================== +Standard models with full waveform generation +============================================== + +.. card:: Gaussian Noise + + ``'gaussian_noise'`` :py:class:`pycbc.inference.models.gaussian_noise.GaussianNoise` + + The `gaussian_noise` model is one of the most generic models in + PyCBC Inference. It is designed for gravitational-wave data that is + assumed to be Gaussian and stationary. Otherwise, there are no + restricts on the types of waveform models that may be used. + Waveform models supported by either `get_fd_waveform` or + `get_td_waveform` can be used as waveform models. Use this model only if + none other fits and is more optimized for your specific problem. Because + this waveform model is generic is often slower than the other more + specialized models. + + Supported Marginalizations: None + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + :ref:`Example ` + +.. card:: Marginalized Phase + + ``'marginalized_phase'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPhaseGaussianNoise` + + + The `marginalized_phase` model is a straightforward extension to + the `gaussian_noise` model that marginalizes the signal over an overall + phase. This can account for the orbital phase of a gravitational-wave + signal which is usually a nuissance parameter. However, this + implementation is only correct for signals that only include the + dominant gravitational-wave mode (or 22 mode). This is because each + mode has a different functional dependence on the orbital phase. + + + Supported Marginalizations: coa_phase (automatic, dominant-mode) + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + +.. card:: Marginalized Polarization + + ``'marginalized_polarization'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedPolarization` + + + The `marginalized_polarization` model is a straightforward extension to + the `gaussian_noise` model that marginalizes the signal over + the polarization angle of the gravitational-wave signal. This is + done by filtering both the plus and cross polarizations separately + and then numerically integrating the resulting inner products over + a grid of polarization angle values. The density of the grid + is selectable. Additional marginalizations can also be optionally enabled. + This model is often used in perference to the `marginalized_phase` model + because it can support waveform models with higher order modes. + + Supported Marginalizations: polarization (automatic), coa_phase (dominant-mode), distance + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + +.. card:: Marginalized Time + + ``'marginalized_time'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedTime` + + + The `marginalized_time` model calculates a time series of inner products + between the signal model and data so that the likelihood can be + numerically marginalized over. The marginalization uses a weighted + monte-carlo which samples the most likely regions of the time series + more densly. The time series is also sub-sample interpolated. Higher + mode waveforms are also permitted as both the plus and cross polarizations + are explicitly handled separately. The time marginalization can also be + done in concert with sky and various intrinsic marginalizations. + + Supported Marginalizations: tc (time), distance, coa_phase (dominant mode), polarization, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + :ref:`Example ` + +.. card:: Marginalized Higher Mode Phase + + ``'marginalized_hmpolphase'`` :py:class:`pycbc.inference.models.marginalized_gaussian_noise.MarginalizedHMPolPhase` + + + The `marginalized_hmpolphase` model numerically marginalizes the likelihood + over a grid of both polarization values and orbital phase values. It + explicitly handles this for higher-order-mode waveforms by calculating + all inner products between the data and signal on a mode-by-mode basis. + The likelihoods are then assembled for each polarization and phase value + and numerically integrated over. This model can only be used for + waveform approximants which support the PyCBC higher mode interface + as we need to be able to calculate the each mode separately. + + Supported Marginalizations: polarization (automatic), coa_phase (automatic) + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:✅ + +========================================= +Heterodyne / Relative Models +========================================= + +.. card:: Relative Model + + ``'relative'`` :py:class:`pycbc.inference.models.relbin.Relative` + + The `relative` model uses a reference signal provided in its + configuration to expand the likelihood in terms of differences between + the reference signal and a target signal. If the reference is close to the + peak in the likelihood, the reasonable portions of parameter space to + explore will only have small phase deviations from the reference signal + (i.e. several cycles). This allows us to represent the ration between + target signal and our referencee using a piece-wise linear approximation. + The target signal only needs to calculated at edges of this approximation. + This model thus only supports waveforms which can efficiently generate + a signal only at a given set of frequency values. Higher order modes + are not recommended due their possible violation of the approximation + that the ration between target and reference signals is a slowly varying + smooth function. In this case use the `multi_signal` model and treat + use a `relative` model for each mode. Where the model approximations hold, + use this in preference + to the models that need to generate a full waveform for every likelihood + as these will usually be much faster. + + Supported Marginalizations: distance, coa_phase (dominant mode), polarization + +++ + Earth Rotation:✅ LISA:✅ Higher Modes:❌ + :ref:`Example ` + +.. card:: Relative Time + + ``'relative_time'`` :py:class:`pycbc.inference.models.relbin.RelativeTime` + + The `relative_time` model extends the `relative model` by using calculating + the likelihood for a grid of possible merger times. A weighted monte-carlo + is performed to integrate over the merger time. Sub-sample interpolation + of the time series is performed. Sky location marginalization among others + can be done in concert. + + Supported Marginalizations: distance, coa_phase (dominant mode), polarization, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + +.. card:: Relative Time Dominant-mode Only + + ``'relative_time'`` :py:class:`pycbc.inference.models.relbin.RelativeTime` + + The `relative_time_dom` model further specializes the model to completely + preclude use with higher order mode signals. For this restriction, it + gains the ability to marginalize over inclination + + Supported Marginalizations: distance, coa_phase (dominant mode), polarization, inclination, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + + +========================================= +Extrinsic Parameter Only Models +========================================= + +.. card:: Single Template + + ``'single_template'`` :py:class:`pycbc.inference.models.single_template.SingleTemplate` + + The `single_template` model is only for extrinsic parameter estimation e.g. + sky location, distance, inclination, etc. It speeds up parameter estimation + by completely avoiding waveform generation while calculating likelihoods. + A single reference signal is generated with fixed intrinsic (masses, spins, etc) + parameters. The likelihoods can then be precalculated up to constant factors + which vary with the extrinsic parameters. Only dominant-mode signals are supported + as the plus and cross polarizations are assumed to be different only by + a phase. With this model all supported parameters may be marginalized over + or any subset. + + Supported Marginalizations: tc (time), distance, coa_phase (dominant mode), polarization, ra dec + +++ + Earth Rotation:❌ LISA:❌ Higher Modes:❌ + :ref:`Example ` + +================================================= +Composite Models +================================================= + +.. card:: Hierarchical + + :py:class:`pycbc.inference.models.hierarchical.HierarchicalModel` + + The hierachical model is a container for submodels. Each submodel + makes an indepedent contribution to an overall likelihood function. + + See :ref:`the full page on the hierarchical model `. + +.. card:: Multiple Signal + + ``'multi_signal'`` :py:class:`pycbc.inference.models.hierarchical.MultiSignalModel` + + This is a container for submodels where each model shares use of the same + data and hence there may be cross terms between the several signals. + This model requires support from the submodel to handle cross-term + calculations. Some supported models include the `gaussian_noise`, `relative` + and `single_template`. diff --git a/docs/install.rst b/docs/install.rst index f5b85aee832..9f9c308be74 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -70,12 +70,12 @@ available in a 'IGWN Conda' environment. To see what environments are available This should yield ``igwn-py37`` as one choice. The output of this command will also tell you the location of the environment in the file system. Then, the location of the -python3.7 executable is for instance ``/cvmfs/oasis.opensciencegrid.org/ligo/sw/conda/envs/igwn-py37/bin/python`` +python3.7 executable is for instance ``/cvmfs/software.igwn.org/conda/envs/igwn-py37/bin/python`` and you will create the virtualenv via the command .. code-block:: bash - virtualenv -p /cvmfs/oasis.opensciencegrid.org/ligo/sw/conda/envs/igwn-py37/bin/python env + virtualenv -p /cvmfs/software.igwn.org/conda/envs/igwn-py37/bin/python env Once the virtualenv has been created you can install PyCBC from PyPI or a local copy with the `[igwn]` extra specifier to install the optional extra requirements diff --git a/docs/install_lalsuite.rst b/docs/install_lalsuite.rst index 438b3b358bb..52d7e340925 100644 --- a/docs/install_lalsuite.rst +++ b/docs/install_lalsuite.rst @@ -146,7 +146,7 @@ run the command .. code-block:: bash - echo 'export LAL_DATA_PATH=/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation' >> $VIRTUAL_ENV/bin/activate + echo 'export LAL_DATA_PATH=/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation' >> $VIRTUAL_ENV/bin/activate to add the appropriate path to your virtual environment's ``activate`` script. Then deactivate and activate your virtual environment. diff --git a/docs/pycbc_condition_strain.rst b/docs/pycbc_condition_strain.rst index c4c6b9ba68e..001fcd86b1a 100644 --- a/docs/pycbc_condition_strain.rst +++ b/docs/pycbc_condition_strain.rst @@ -115,4 +115,4 @@ might read more data than what specified by the ``--gps-start-time`` and value that causes ``pycbc_condition_strain`` to request data outside the range of availability. -.. _GWOSC: https://www.gw-openscience.org/about/ +.. _GWOSC: https://www.gwosc.org/about/ diff --git a/docs/workflow/pycbc_make_coinc_search_workflow.rst b/docs/workflow/pycbc_make_coinc_search_workflow.rst index 93f8f8ad9ac..d83271389df 100644 --- a/docs/workflow/pycbc_make_coinc_search_workflow.rst +++ b/docs/workflow/pycbc_make_coinc_search_workflow.rst @@ -1004,8 +1004,8 @@ locations in CVMFS. You will also need to specify where the code should get the data needed to generate reduced order model waveforms. To do this add the following additional arguments to ``pycbc_submit_dax``:: - --append-site-profile 'local:env|LAL_DATA_PATH:/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation' \ - --append-site-profile 'osg:env|LAL_DATA_PATH:/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation' \ + --append-site-profile 'local:env|LAL_DATA_PATH:/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation' \ + --append-site-profile 'osg:env|LAL_DATA_PATH:/cvmfs/software.igwn.org/pycbc/lalsuite-extra/current/share/lalsimulation' \ Here, ``current`` is a symbolic link to the latest version of the data and can be replaced with a specific release (e.g. ``e02dab8c``) if required. diff --git a/examples/gw150914/audio.py b/examples/gw150914/audio.py index 1b63af981ef..8b82a85cb5b 100644 --- a/examples/gw150914/audio.py +++ b/examples/gw150914/audio.py @@ -9,7 +9,7 @@ # Read data and remove low frequency content fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' -url = "https://www.gw-openscience.org/GW150914data/" + fname +url = "https://www.gwosc.org/GW150914data/" + fname urlretrieve(url, filename=fname) h1 = highpass_fir(read_frame(fname, 'H1:LOSC-STRAIN'), 15.0, 8) diff --git a/examples/gw150914/gw150914_h1_snr.py b/examples/gw150914/gw150914_h1_snr.py index 9a37f85d25d..2360e702827 100644 --- a/examples/gw150914/gw150914_h1_snr.py +++ b/examples/gw150914/gw150914_h1_snr.py @@ -8,7 +8,7 @@ # Read data and remove low frequency content fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' -url = "https://www.gw-openscience.org/GW150914data/" + fname +url = "https://www.gwosc.org/GW150914data/" + fname urlretrieve(url, filename=fname) h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN') h1 = highpass_fir(h1, 15, 8) diff --git a/examples/inference/data/o1.ini b/examples/inference/data/o1.ini index cf0bd7ebf5a..64924323f48 100644 --- a/examples/inference/data/o1.ini +++ b/examples/inference/data/o1.ini @@ -23,7 +23,7 @@ psd-segment-stride = 4 ; with a ligo-data-server, you can use the frame-type argument to automatically ; locate the location of the frame files containing the data. If you are not ; running on one of those computers, download the necessary data from GWOSC -; (gw-openscience.org), remove the frame-type argument, and uncomment +; (gwosc.org), remove the frame-type argument, and uncomment ; frame-files, pointing the latter to the files you downloaded. ;frame-files = H1:/PATH/TO/DOWNLOADED/H1FRAME.gwf L1:/PATH/TO/DOWNLOADED/L1FRAME.gwf frame-type = H1:H1_LOSC_16_V1 L1:L1_LOSC_16_V1 diff --git a/examples/inference/data/o2.ini b/examples/inference/data/o2.ini index ebe71cfb8b2..873e5034149 100644 --- a/examples/inference/data/o2.ini +++ b/examples/inference/data/o2.ini @@ -24,7 +24,7 @@ psd-segment-stride = 4 ; with a ligo-data-server, you can use the frame-type argument to automatically ; locate the location of the frame files containing the data. If you are not ; running on one of those computers, download the necessary data from GWOSC -; (gw-openscience.org), remove the frame-type argument, and uncomment +; (gwosc.org), remove the frame-type argument, and uncomment ; frame-files, pointing the latter to the files you downloaded. frame-type = H1:H1_GWOSC_O2_16KHZ_R1 L1:L1_GWOSC_O2_16KHZ_R1 V1:V1_GWOSC_O2_16KHZ_R1 ;frame-files = H1:/PATH/TO/DOWNLOADED/H1FRAME.gwf L1:/PATH/TO/DOWNLOADED/L1FRAME.gwf V1:/PATH/TO/DOWNLOADED/V1FRAME.gwf diff --git a/examples/inference/margtime/get.sh b/examples/inference/margtime/get.sh index c8f9ece6359..54bc8f0abf1 100644 --- a/examples/inference/margtime/get.sh +++ b/examples/inference/margtime/get.sh @@ -4,5 +4,6 @@ for ifo in H-H1 L-L1 do file=${ifo}_GWOSC_4KHZ_R1-1126257415-4096.gwf test -f ${file} && continue - curl -O --show-error --silent https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW150914/v3/${ifo}_GWOSC_4KHZ_R1-1126257415-4096.gwf + curl -O -L --show-error --silent \ + https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW150914/v3/${ifo}_GWOSC_4KHZ_R1-1126257415-4096.gwf done diff --git a/examples/live/check_results.py b/examples/live/check_results.py index 413e643ac62..31089efbe28 100755 --- a/examples/live/check_results.py +++ b/examples/live/check_results.py @@ -11,6 +11,7 @@ from pycbc.io.ligolw import LIGOLWContentHandler from ligo.lw.utils import load_filename as load_xml_doc from ligo.lw import lsctables +from pycbc import conversions as conv def close(a, b, c): @@ -130,7 +131,8 @@ def check_found_events(args): # create field array to store properties of triggers dtype = [('mass1', float), ('mass2', float), ('spin1z', float), ('spin2z', float), - ('tc', float), ('net_snr', float)] + ('tc', float), ('net_snr', float), + ('ifar', float)] trig_props = FieldArray(n_found, dtype=dtype) # store properties of found triggers @@ -139,18 +141,21 @@ def check_found_events(args): xmldoc = load_xml_doc( ctrigfp, False, contenthandler=LIGOLWContentHandler) si_table = lsctables.SnglInspiralTable.get_table(xmldoc) + ci_table = lsctables.CoincInspiralTable.get_table(xmldoc) trig_props['tc'][x] = si_table[0].end trig_props['mass1'][x] = si_table[0].mass1 trig_props['mass2'][x] = si_table[0].mass2 trig_props['spin1z'][x] = si_table[0].spin1z trig_props['spin2z'][x] = si_table[0].spin2z + trig_props['ifar'][x] = conv.sec_to_year(1 / ci_table[0].combined_far) snr_list = si_table.getColumnByName('snr').asarray() trig_props['net_snr'][x] = sum(snr_list ** 2) ** 0.5 log.info('Single-detector SNRs: %s', snr_list) log.info('Network SNR: %f', trig_props['net_snr'][x]) + log.info('IFAR: %f', trig_props['ifar'][x]) log.info('Merger time: %f', trig_props['tc'][x]) log.info('Mass 1: %f', trig_props['mass1'][x]) log.info('Mass 2: %f', trig_props['mass2'][x]) diff --git a/examples/live/run.sh b/examples/live/run.sh index 249382f8296..3cda2773664 100755 --- a/examples/live/run.sh +++ b/examples/live/run.sh @@ -195,12 +195,11 @@ python -m mpi4py `which pycbc_live` \ --src-class-eff-to-lum-distance 0.74899 \ --src-class-lum-distance-to-delta -0.51557 -0.32195 \ --run-snr-optimization \ ---enable-single-detector-background \ +--sngl-ifar-est-dist conservative \ --single-newsnr-threshold 9 \ --single-duration-threshold 7 \ --single-reduced-chisq-threshold 2 \ --single-fit-file single_trigger_fits.hdf \ ---sngl-ifar-est-dist conservative \ --verbose # note that, at this point, some SNR optimization processes may still be diff --git a/examples/multi_inspiral/run.sh b/examples/multi_inspiral/run.sh index 30621131ddf..cbbc9bd131d 100755 --- a/examples/multi_inspiral/run.sh +++ b/examples/multi_inspiral/run.sh @@ -3,11 +3,11 @@ CONFIG_URL=https://github.com/gwastro/pycbc-config/raw/master/test/multi_inspiral BANK_FILE=gw170817_single_template.hdf BANK_VETO_FILE=bank_veto_bank.xml -H1_FRAME=https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW170817/v3/H-H1_GWOSC_4KHZ_R1-1187006835-4096.gwf +H1_FRAME=https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW170817/v3/H-H1_GWOSC_4KHZ_R1-1187006835-4096.gwf H1_CHANNEL=GWOSC-4KHZ_R1_STRAIN L1_FRAME=https://dcc.ligo.org/public/0144/T1700406/003/L-L1_CLEANED_HOFT_C02_T1700406_v3-1187008667-4096.gwf L1_CHANNEL=DCH-CLEAN_STRAIN_C02_T1700406_v3 -V1_FRAME=https://www.gw-openscience.org/eventapi/html/GWTC-1-confident/GW170817/v3/V-V1_GWOSC_4KHZ_R1-1187006835-4096.gwf +V1_FRAME=https://www.gwosc.org/eventapi/html/GWTC-1-confident/GW170817/v3/V-V1_GWOSC_4KHZ_R1-1187006835-4096.gwf V1_CHANNEL=GWOSC-4KHZ_R1_STRAIN echo -e "\\n\\n>> [`date`] Getting template bank" diff --git a/pycbc/__init__.py b/pycbc/__init__.py index b7a2d4aad27..53c35debfff 100644 --- a/pycbc/__init__.py +++ b/pycbc/__init__.py @@ -158,7 +158,7 @@ def makedir(path): try: import pycbc.fft.mkl HAVE_MKL=True -except ImportError: +except (ImportError, OSError): HAVE_MKL=False # Check for openmp suppport, currently we pressume it exists, unless on diff --git a/pycbc/catalog/catalog.py b/pycbc/catalog/catalog.py index 6f69c76f8ba..0f5ada0d92f 100644 --- a/pycbc/catalog/catalog.py +++ b/pycbc/catalog/catalog.py @@ -32,7 +32,7 @@ # FIXME with posteriors when available and we can just post-process that # LVC catalogs -base_lvc_url = "https://www.gw-openscience.org/eventapi/jsonfull/{}/" +base_lvc_url = "https://www.gwosc.org/eventapi/jsonfull/{}/" _catalogs = {'GWTC-1-confident': 'LVC', 'GWTC-1-marginal': 'LVC', 'Initial_LIGO_Virgo': 'LVC', diff --git a/pycbc/conversions.py b/pycbc/conversions.py index 3cadef1f2d9..d4c67ecb957 100644 --- a/pycbc/conversions.py +++ b/pycbc/conversions.py @@ -140,12 +140,12 @@ def invq_from_mass1_mass2(mass1, mass2): def eta_from_mass1_mass2(mass1, mass2): """Returns the symmetric mass ratio from mass1 and mass2.""" - return mass1*mass2 / (mass1+mass2)**2. + return mass1*mass2 / (mass1 + mass2)**2. def mchirp_from_mass1_mass2(mass1, mass2): """Returns the chirp mass from mass1 and mass2.""" - return eta_from_mass1_mass2(mass1, mass2)**(3./5) * (mass1+mass2) + return eta_from_mass1_mass2(mass1, mass2)**(3./5) * (mass1 + mass2) def mass1_from_mtotal_q(mtotal, q): @@ -155,7 +155,7 @@ def mass1_from_mtotal_q(mtotal, q): (heavier) mass. If q < 1, the returned mass will be the secondary (lighter) mass. """ - return q*mtotal / (1.+q) + return q*mtotal / (1. + q) def mass2_from_mtotal_q(mtotal, q): @@ -165,7 +165,7 @@ def mass2_from_mtotal_q(mtotal, q): (lighter) mass. If q < 1, the returned mass will be the primary (heavier) mass. """ - return mtotal / (1.+q) + return mtotal / (1. + q) def mass1_from_mtotal_eta(mtotal, eta): @@ -185,7 +185,7 @@ def mass2_from_mtotal_eta(mtotal, eta): def mtotal_from_mchirp_eta(mchirp, eta): """Returns the total mass from the chirp mass and symmetric mass ratio. """ - return mchirp / (eta**(3./5.)) + return mchirp / eta**(3./5.) def mass1_from_mchirp_eta(mchirp, eta): @@ -219,7 +219,7 @@ def _mass2_from_mchirp_mass1(mchirp, mass1): This has 3 solutions but only one will be real. """ a = mchirp**5 / mass1**3 - roots = numpy.roots([1,0,-a,-a*mass1]) + roots = numpy.roots([1, 0, -a, -a * mass1]) # Find the real one real_root = roots[(abs(roots - roots.real)).argmin()] return real_root.real @@ -261,7 +261,7 @@ def _mass_from_knownmass_eta(known_mass, eta, known_is_secondary=False, float The other component mass. """ - roots = numpy.roots([eta, (2*eta - 1)*known_mass, eta*known_mass**2.]) + roots = numpy.roots([eta, (2*eta - 1) * known_mass, eta * known_mass**2.]) if force_real: roots = numpy.real(roots) if known_is_secondary: @@ -298,25 +298,28 @@ def eta_from_q(q): Note that the mass ratio may be either < 1 or > 1. """ - return q / (1.+q)**2 + return q / (1. + q)**2 def mass1_from_mchirp_q(mchirp, q): """Returns the primary mass from the given chirp mass and mass ratio.""" - mass1 = (q**(2./5.))*((1.0 + q)**(1./5.))*mchirp + mass1 = q**(2./5.) * (1.0 + q)**(1./5.) * mchirp return mass1 + def mass2_from_mchirp_q(mchirp, q): """Returns the secondary mass from the given chirp mass and mass ratio.""" - mass2 = (q**(-3./5.))*((1.0 + q)**(1./5.))*mchirp + mass2 = q**(-3./5.) * (1.0 + q)**(1./5.) * mchirp return mass2 + def _a0(f_lower): """Used in calculating chirp times: see Cokelaer, arxiv.org:0706.4437 appendix 1, also lalinspiral/python/sbank/tau0tau3.py. """ return 5. / (256. * (numpy.pi * f_lower)**(8./3.)) + def _a3(f_lower): """Another parameter used for chirp times""" return numpy.pi / (8. * (numpy.pi * f_lower)**(5./3.)) @@ -332,6 +335,15 @@ def tau0_from_mtotal_eta(mtotal, eta, f_lower): return _a0(f_lower) / (mtotal**(5./3.) * eta) +def tau0_from_mchirp(mchirp, f_lower): + r"""Returns :math:`\tau_0` from the chirp mass and the given frequency. + """ + # convert to seconds + mchirp = mchirp * lal.MTSUN_SI + # formulae from arxiv.org:0706.4437 + return _a0(f_lower) / mchirp ** (5./3.) + + def tau3_from_mtotal_eta(mtotal, eta, f_lower): r"""Returns :math:`\tau_0` from the total mass, symmetric mass ratio, and the given frequency. @@ -358,6 +370,14 @@ def tau3_from_mass1_mass2(mass1, mass2, f_lower): return tau3_from_mtotal_eta(mtotal, eta, f_lower) +def mchirp_from_tau0(tau0, f_lower): + r"""Returns chirp mass from :math:`\tau_0` and the given frequency. + """ + mchirp = (_a0(f_lower) / tau0) ** (3./5.) # in seconds + # convert back to solar mass units + return mchirp / lal.MTSUN_SI + + def mtotal_from_tau0_tau3(tau0, tau3, f_lower, in_seconds=False): r"""Returns total mass from :math:`\tau_0, \tau_3`.""" @@ -1605,6 +1625,7 @@ def nltides_gw_phase_diff_isco(f_low, f0, amplitude, n, m1, m2): 'mass1_from_mass2_eta', 'eta_from_q', 'mass1_from_mchirp_q', 'mass2_from_mchirp_q', 'tau0_from_mtotal_eta', 'tau3_from_mtotal_eta', 'tau0_from_mass1_mass2', + 'tau0_from_mchirp', 'mchirp_from_tau0', 'tau3_from_mass1_mass2', 'mtotal_from_tau0_tau3', 'eta_from_tau0_tau3', 'mass1_from_tau0_tau3', 'mass2_from_tau0_tau3', 'primary_spin', 'secondary_spin', diff --git a/pycbc/dq.py b/pycbc/dq.py index 171f01441bd..3a48b3eee29 100644 --- a/pycbc/dq.py +++ b/pycbc/dq.py @@ -101,7 +101,7 @@ def parse_veto_definer(veto_def_filename, ifos): return data -GWOSC_URL = 'https://www.gw-openscience.org/timeline/segments/json/{}/{}_{}/{}/{}/' +GWOSC_URL = 'https://www.gwosc.org/timeline/segments/json/{}/{}_{}/{}/{}/' def query_dqsegdb2(detector, flag_name, start_time, end_time, server): diff --git a/pycbc/events/coinc.py b/pycbc/events/coinc.py index 5ca119ebc20..23eb7da4a5d 100644 --- a/pycbc/events/coinc.py +++ b/pycbc/events/coinc.py @@ -369,8 +369,8 @@ def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, **kwargs): else: time = 0.5 * (time2 + time1) - tslide = timeslide_id.astype(numpy.float128) - time = time.astype(numpy.float128) + tslide = timeslide_id.astype(numpy.longdouble) + time = time.astype(numpy.longdouble) span = (time.max() - time.min()) + window * 10 time = time + span * tslide @@ -424,8 +424,8 @@ def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window, nifos_minusone = (num_ifos - numpy.ones_like(num_ifos)) time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos - tslide = timeslide_id.astype(numpy.float128) - time_avg = time_avg.astype(numpy.float128) + tslide = timeslide_id.astype(numpy.longdouble) + time_avg = time_avg.astype(numpy.longdouble) span = (time_avg.max() - time_avg.min()) + window * 10 time_avg = time_avg + span * tslide diff --git a/pycbc/events/single.py b/pycbc/events/single.py index 266e853de48..f41b2dfb131 100644 --- a/pycbc/events/single.py +++ b/pycbc/events/single.py @@ -57,29 +57,88 @@ def insert_args(parser): 'coefficients and counts for specific ' 'single trigger IFAR fitting.') parser.add_argument('--sngl-ifar-est-dist', nargs='+', - default='conservative', - choices=['conservative', 'mean', 'fixed'], action=MultiDetOptionAction, help='Which trigger distribution to use when ' 'calculating IFAR of single triggers. ' - 'Default conservative. Can be given as ' - 'a single value or as detector-value pairs, ' - 'e.g. H1:mean L1:mean V1:conservative') + 'Can be given as a single value or as ' + 'detector-value pairs, e.g. H1:mean ' + 'L1:mean V1:conservative') + + @staticmethod + def verify_args(args, parser, ifos): + sngl_opts = [args.single_reduced_chisq_threshold, + args.single_duration_threshold, + args.single_newsnr_threshold, + args.sngl_ifar_est_dist] + sngl_opts_str = ("--single-reduced-chisq-threshold, " + "--single-duration-threshold, " + "--single-newsnr-threshold, " + "--sngl-ifar-est-dist") + + if any(sngl_opts) and not all(sngl_opts): + parser.error(f"Single detector trigger options ({sngl_opts_str}) " + "must either all be given or none.") + + if args.enable_single_detector_upload \ + and not args.enable_gracedb_upload: + parser.error("--enable-single-detector-upload requires " + "--enable-gracedb-upload to be set.") + + sngl_optional_opts = [args.single_fixed_ifar, + args.single_fit_file] + sngl_optional_opts_str = ("--single-fixed-ifar, " + "--single-fit-file") + if any(sngl_optional_opts) and not all(sngl_opts): + parser.error("Optional singles options " + f"({sngl_optional_opts_str}) given but no " + f"required options ({sngl_opts_str}) are.") + + for ifo in ifos: + # Check which option(s) are needed for each IFO and if they exist: + + # Notes for the logic here: + # args.sngl_ifar_est_dist.default_set is True if single value has + # been set to be the same for all values + # bool(args.sngl_ifar_est_dist) is True if option is given + if args.sngl_ifar_est_dist and \ + not args.sngl_ifar_est_dist.default_set \ + and not args.sngl_ifar_est_dist[ifo]: + # Option has been given, different for each IFO, + # and this one is not present + parser.error("All IFOs required in --single-ifar-est-dist " + "if IFO-specific options are given.") + + if args.sngl_ifar_est_dist[ifo] is None: + # Default - no singles being used + continue + + if not args.sngl_ifar_est_dist[ifo] == 'fixed': + if not args.single_fit_file: + # Fixed IFAR option doesnt need the fits file + parser.error(f"Single detector trigger fits file must be " + "given if --single-ifar-est-dist is not " + f"fixed for all ifos (at least {ifo} has " + f"option {args.sngl_ifar_est_dist[ifo]}).") + if ifo in args.single_fixed_ifar: + parser.error(f"Value {args.single_fixed_ifar[ifo]} given " + f"for {ifo} in --single-fixed-ifar, but " + f"--single-ifar-est-dist for {ifo} " + f"is {args.sngl_ifar_est_dist[ifo]}, not " + "fixed.") + else: + # Check that the fixed IFAR value has actually been + # given if using this instead of a distribution + if not args.single_fixed_ifar[ifo]: + parser.error(f"--single-fixed-ifar must be " + "given if --single-ifar-est-dist is fixed. " + f"This is true for at least {ifo}.") + + # Return value is a boolean whether we are analysing singles or not + # The checks already performed mean that all(sngl_opts) is okay + return all(sngl_opts) @classmethod def from_cli(cls, args, ifo): - sngl_opts_required = all([args.single_fit_file, - args.single_reduced_chisq_threshold, - args.single_duration_threshold, - args.single_newsnr_threshold]) - if args.enable_single_detector_background and not sngl_opts_required: - raise RuntimeError("Single detector trigger options " - "(--single-fit-file, " - "--single-reduced-chisq-threshold, " - "--single-duration-threshold, " - "--single-newsnr-threshold) " - "must ALL be given if single detector " - "background is enabled") return cls( ifo, newsnr_threshold=args.single_newsnr_threshold[ifo], reduced_chisq_threshold=args.single_reduced_chisq_threshold[ifo], @@ -139,7 +198,8 @@ def check(self, trigs, data_reader): return candidate def calculate_ifar(self, sngl_ranking, duration): - if self.fixed_ifar: + logging.info("Calculating IFAR") + if self.fixed_ifar and self.ifo in self.fixed_ifar: return self.fixed_ifar[self.ifo] try: diff --git a/pycbc/frame/losc.py b/pycbc/frame/losc.py index b2b5c229b96..f5aff99e211 100644 --- a/pycbc/frame/losc.py +++ b/pycbc/frame/losc.py @@ -14,11 +14,11 @@ # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ -This modules contains functions for getting data from the LOSC +This modules contains functions for getting data from the GWOSC """ from pycbc.io import get_file -_losc_url = "https://www.gw-openscience.org/archive/links/%s/%s/%s/%s/json/" +_gwosc_url = "https://www.gwosc.org/archive/links/%s/%s/%s/%s/json/" def get_run(time, ifo=None): """ Return the run name for a given time @@ -90,7 +90,7 @@ def losc_frame_json(ifo, start_time, end_time): 'You have requested data that uses ' 'both %s and %s' % (run, run2)) - url = _losc_url % (run, ifo, int(start_time), int(end_time)) + url = _gwosc_url % (run, ifo, int(start_time), int(end_time)) try: return json.loads(urlopen(url).read().decode()) @@ -100,7 +100,7 @@ def losc_frame_json(ifo, start_time, end_time): 'ifo=%s, run=%s, between %s-%s' % (ifo, run, start_time, end_time)) def losc_frame_urls(ifo, start_time, end_time): - """ Get a list of urls to losc frame files + """ Get a list of urls to GWOSC frame files Parameters ---------- @@ -121,7 +121,7 @@ def losc_frame_urls(ifo, start_time, end_time): return [d['url'] for d in data if d['format'] == 'gwf'] def read_frame_losc(channels, start_time, end_time): - """ Read channels from losc data + """ Read channels from GWOSC data Parameters ---------- @@ -162,7 +162,7 @@ def read_frame_losc(channels, start_time, end_time): return ts def read_strain_losc(ifo, start_time, end_time): - """ Get the strain data from the LOSC data + """ Get the strain data from the GWOSC data Parameters ---------- diff --git a/pycbc/io/live.py b/pycbc/io/live.py index 5e546cfc2a7..754fcbcf563 100644 --- a/pycbc/io/live.py +++ b/pycbc/io/live.py @@ -92,7 +92,6 @@ def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): snr_ifos = sld.keys() # Ifos with SNR time series calculated self.snr_series = {ifo: sld[ifo]['snr_series'] for ifo in snr_ifos} # Extra ifos have SNR time series but not sngl inspiral triggers - extra_ifos = list(set(snr_ifos) - set(self.et_ifos)) for ifo in snr_ifos: # Ifos used for sky loc must have a PSD @@ -101,14 +100,11 @@ def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): else: self.snr_series = None snr_ifos = self.et_ifos - extra_ifos = [] # Set up the bare structure of the xml document outdoc = ligolw.Document() outdoc.appendChild(ligolw.LIGO_LW()) - # FIXME is it safe (in terms of downstream operations) to let - # `program_name` default to the actual script name? proc_id = create_process_table(outdoc, program_name='pycbc', detectors=snr_ifos).process_id @@ -191,16 +187,6 @@ def __init__(self, coinc_ifos, ifos, coinc_results, **kwargs): self.et_ifos]) \ + self.time_offset - # For extra detectors used only for sky loc, respect BAYESTAR's - # assumptions and checks - bayestar_check_fields = ('mass1 mass2 mtotal mchirp eta spin1x ' - 'spin1y spin1z spin2x spin2y spin2z').split() - for sngl in sngl_inspiral_table: - if sngl.ifo in extra_ifos: - for bcf in bayestar_check_fields: - setattr(sngl, bcf, getattr(sngl_populated, bcf)) - sngl.end = lal.LIGOTimeGPS(self.merger_time) - outdoc.childNodes[0].appendChild(coinc_event_map_table) outdoc.childNodes[0].appendChild(sngl_inspiral_table) diff --git a/pycbc/strain/strain.py b/pycbc/strain/strain.py index 0eea0e7c9e9..2793012d565 100644 --- a/pycbc/strain/strain.py +++ b/pycbc/strain/strain.py @@ -1433,6 +1433,9 @@ def __init__(self, frame_src, channel_name, start_time, autogating_pad=None, autogating_width=None, autogating_taper=None, + autogating_duration=None, + autogating_psd_segment_length=None, + autogating_psd_stride=None, state_channel=None, data_quality_channel=None, idq_channel=None, @@ -1490,6 +1493,12 @@ def __init__(self, frame_src, channel_name, start_time, Half-duration of the zeroed-out portion of autogates. autogating_taper: float, Optional Duration of taper on either side of the gating window in seconds. + autogating_duration: float, Optional + Amount of data in seconds to apply autogating on. + autogating_psd_segment_length: float, Optional + The length in seconds of each segment used to estimate the PSD with Welch's method. + autogating_psd_stride: float, Optional + The overlap in seconds between each segment used to estimate the PSD with Welch's method. state_channel: {str, None}, Optional Channel to use for state information about the strain data_quality_channel: {str, None}, Optional @@ -1598,6 +1607,9 @@ def __init__(self, frame_src, channel_name, start_time, self.autogating_pad = autogating_pad self.autogating_width = autogating_width self.autogating_taper = autogating_taper + self.autogating_duration = autogating_duration + self.autogating_psd_segment_length = autogating_psd_segment_length + self.autogating_psd_stride = autogating_psd_stride self.gate_params = [] self.sample_rate = sample_rate @@ -1699,6 +1711,27 @@ def recalculate_psd(self): logging.info("Recalculating %s PSD, %s", self.detector, psd.dist) return True + def check_psd_dist(self, min_dist, max_dist): + """Check that the horizon distance of a detector is within a required + range. If so, return True, otherwise log a warning and return False. + """ + if self.psd is None: + # ignore check + return True + # Note that the distance can in principle be inf or nan, e.g. if h(t) + # is identically zero. The check must fail in those cases. Be careful + # with how the logic works out when comparing inf's or nan's! + good = self.psd.dist >= min_dist and self.psd.dist <= max_dist + if not good: + logging.info( + "%s PSD dist %s outside acceptable range [%s, %s]", + self.detector, + self.psd.dist, + min_dist, + max_dist + ) + return good + def overwhitened_data(self, delta_f): """ Return overwhitened data @@ -1894,9 +1927,11 @@ def advance(self, blocksize, timeout=10): # apply gating if needed if self.autogating_threshold is not None: + autogating_duration_length = self.autogating_duration * self.sample_rate + autogating_start_sample = int(len(self.strain) - autogating_duration_length) glitch_times = detect_loud_glitches( - strain[:-self.corruption], - psd_duration=2., psd_stride=1., + self.strain[autogating_start_sample:-self.corruption], + psd_duration=self.autogating_psd_segment_length, psd_stride=self.autogating_psd_stride, threshold=self.autogating_threshold, cluster_window=self.autogating_cluster, low_freq_cutoff=self.highpass_frequency, @@ -1968,6 +2003,9 @@ def from_cli(cls, ifo, args, maxlen): autogating_pad=args.autogating_pad, autogating_width=args.autogating_width, autogating_taper=args.autogating_taper, + autogating_duration=args.autogating_duration, + autogating_psd_segment_length=args.autogating_psd_segment_length, + autogating_psd_stride=args.autogating_psd_stride, psd_abort_difference=args.psd_abort_difference, psd_recalculate_difference=args.psd_recalculate_difference, force_update_cache=args.force_update_cache, diff --git a/requirements.txt b/requirements.txt index 7940c83cff4..91598875809 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ tqdm gwdatafind # Requirements for full pegasus env -pegasus-wms.api >= 5.0.3 +pegasus-wms.api == 5.0.3 # need to pin until pegasus for further upstream # addresses incompatibility between old flask/jinja2 and latest markupsafe markupsafe <= 2.0.1 diff --git a/setup.py b/setup.py index 0e18895211c..0d68fb625b7 100755 --- a/setup.py +++ b/setup.py @@ -122,7 +122,7 @@ def __getattr__(self, attr): vinfo = _version_helper.generate_git_version_info() except: vinfo = vdummy() - vinfo.version = '2.1.2' + vinfo.version = '2.1.3' vinfo.release = 'True' with open('pycbc/version.py', 'wb') as f: diff --git a/test/test_conversions.py b/test/test_conversions.py index 1b721098064..0e32f14189b 100644 --- a/test/test_conversions.py +++ b/test/test_conversions.py @@ -126,8 +126,10 @@ def test_round_robin(self): (conversions.eta_from_q, (self.q,), 'eta'), (conversions.mass1_from_mchirp_q, (self.mchirp, self.q), 'mp'), (conversions.mass2_from_mchirp_q, (self.mchirp, self.q), 'ms'), + (conversions.tau0_from_mchirp, (self.mchirp, self.f_lower), 'tau0'), (conversions.tau0_from_mass1_mass2, (self.m1, self.m2, self.f_lower), 'tau0'), (conversions.tau3_from_mass1_mass2, (self.m1, self.m2, self.f_lower), 'tau3'), + (conversions.mchirp_from_tau0, (self.tau0, self.f_lower), 'mchirp'), (conversions.mtotal_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'mtotal'), (conversions.eta_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'eta'), (conversions.mass1_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'mp'), diff --git a/tools/cvmfs-default.local b/tools/cvmfs-default.local index 6b11c123d76..04bc26c1e55 100644 --- a/tools/cvmfs-default.local +++ b/tools/cvmfs-default.local @@ -1,5 +1,5 @@ CVMFS_USER=cvmfs -CVMFS_REPOSITORIES=config-osg.opensciencegrid.org,oasis.opensciencegrid.org,gwosc.osgstorage.org +CVMFS_REPOSITORIES=config-osg.opensciencegrid.org,software.igwn.org,gwosc.osgstorage.org CVMFS_QUOTA_LIMIT=5000 CVMFS_HTTP_PROXY=DIRECT CVMFS_MAX_RETRIES=10 diff --git a/tools/docker_build_dist.sh b/tools/docker_build_dist.sh index 3a02d056f22..db20424bb36 100755 --- a/tools/docker_build_dist.sh +++ b/tools/docker_build_dist.sh @@ -112,9 +112,11 @@ EOF echo -e "\\n>> [`date`] Deploying release ${SOURCE_TAG} to CVMFS" # remove lalsuite source and deploy on cvmfs rm -rf ${VENV_PATH}/src/lalsuite - ssh ouser.ligo@oasis-login.opensciencegrid.org "mkdir -p /home/login/ouser.ligo/ligo/deploy/sw/pycbc/${ENV_OS}/virtualenv/pycbc-${SOURCE_TAG}" - rsync --rsh=ssh $RSYNC_OPTIONS -qraz ${VENV_PATH}/ ouser.ligo@oasis-login.opensciencegrid.org:/home/login/ouser.ligo/ligo/deploy/sw/pycbc/${ENV_OS}/virtualenv/pycbc-${SOURCE_TAG}/ - ssh ouser.ligo@oasis-login.opensciencegrid.org osg-oasis-update + export RSYNC_OPTIONS VENV_PATH ENV_OS SOURCE_TAG + if ! bash /pycbc/tools/venv_transfer_commands.sh; then + ssh cvmfs.pycbc@cvmfs-software.ligo.caltech.edu "sudo -u repo.software cvmfs_server abort -f" + exit 1 + fi fi echo -e "\\n>> [`date`] virtualenv deployment complete" fi diff --git a/tools/docker_build_prepssh.sh b/tools/docker_build_prepssh.sh index 864f385e7b6..9c39bc63d9f 100644 --- a/tools/docker_build_prepssh.sh +++ b/tools/docker_build_prepssh.sh @@ -6,4 +6,5 @@ echo -e "Host sugwg-test1.phy.syr.edu\n\tStrictHostKeyChecking no\n" >> ~/.ssh/c echo -e "Host sugwg-condor.phy.syr.edu\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config ; echo -e "Host oasis-login.opensciencegrid.org\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config ; echo -e "Host code.pycbc.phy.syr.edu\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config ; +echo -e "@cert-authority * ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHa03AZF3CvJ1C4Po15swSaMYI4kPszyBH/uOKHQYvu+EpehSfMZMaX5D7pUpc5cAXvMEEFzlZJQH4pOioIlqyE= IGWN_CIT_SSH_CERT_AUTHORITY" >> ~/.ssh/known_hosts chmod 600 ~/.ssh/id_rsa ~/.ssh/config ~/.ssh/ldg_user ~/.ssh/ldg_token diff --git a/tools/venv_transfer_commands.sh b/tools/venv_transfer_commands.sh new file mode 100644 index 00000000000..0d793bb1907 --- /dev/null +++ b/tools/venv_transfer_commands.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +# Please don't try and run this script directly! +ssh cvmfs.pycbc@cvmfs-software.ligo.caltech.edu <=12.2.0 + gxx_linux-64>=12.2.0 + binutils_linux-64>=2.39 gsl lapack==3.6.1 conda_channels=conda-forge