diff --git a/bin/all_sky_search/pycbc_add_statmap b/bin/all_sky_search/pycbc_add_statmap index 7cf5da20d97..7135c8ffabf 100755 --- a/bin/all_sky_search/pycbc_add_statmap +++ b/bin/all_sky_search/pycbc_add_statmap @@ -361,8 +361,14 @@ fg_fars_out = np.sum(isincombo_mask * fg_fars, axis=0) fg_fars_exc_out = np.sum(isincombo_mask * fg_fars_exc, axis=0) # Apply any limits as appropriate -fg_fars_out = significance.apply_far_limit(fg_fars_out, significance_dict, combo=fg_coinc_type) -fg_fars_exc_out = significance.apply_far_limit(fg_fars_exc_out, significance_dict, combo=fg_coinc_type) +fg_fars_out = significance.apply_far_limit( + fg_fars_out, + significance_dict, + combo=fg_coinc_type) +fg_fars_exc_out = significance.apply_far_limit( + fg_fars_exc_out, + significance_dict, + combo=fg_coinc_type) fg_ifar = conv.sec_to_year(1. / fg_fars_out) fg_ifar_exc = conv.sec_to_year(1. / fg_fars_exc_out) @@ -562,6 +568,7 @@ while True: final_combined_fg = final_combined_fg + \ combined_fg_data.select(where_combined) combined_fg_data = combined_fg_data.remove(where_combined) + fg_coinc_type = np.delete(fg_coinc_type, where_combined) n_triggers -= 1 logging.info('Removing background triggers at time {} within window ' @@ -605,6 +612,17 @@ while True: sep_bg_data[key].data['decimation_factor'], bg_t_y, **significance_dict[ifo_combo_key]) + fg_far = significance.apply_far_limit( + fg_far, + significance_dict, + combo=key, + ) + bg_far = significance.apply_far_limit( + bg_far, + significance_dict, + combo=key, + ) + sep_bg_data[key].data['ifar'] = 1. / bg_far sep_fg_data[key].data['ifar'] = 1. / fg_far sep_fg_data[key].data['fap'] = 1 - \ @@ -631,9 +649,15 @@ while True: isincombo_mask = np.array([list(is_in_combo_time[ct]) for ct in all_ifo_combos]) fg_fars = np.array([list(far[ct]) for ct in all_ifo_combos]) + fg_fars_out = np.sum(isincombo_mask * fg_fars, axis=0) + fg_fars_out = significance.apply_far_limit( + fg_fars_out, + significance_dict, + combo=fg_coinc_type, + ) # Combine the FARs with the mask to obtain the new ifars combined_fg_data.data['ifar'] = conv.sec_to_year( - 1. / np.sum(isincombo_mask * fg_fars, axis=0)) + 1. / fg_fars_out) fg_time -= args.cluster_window combined_fg_data.data['fap'] = 1 - \ np.exp(-conv.sec_to_year(fg_time) / combined_fg_data.data['ifar']) diff --git a/bin/all_sky_search/pycbc_calculate_dqflag b/bin/all_sky_search/pycbc_calculate_dqflag index b639b539929..981d190ad33 100644 --- a/bin/all_sky_search/pycbc_calculate_dqflag +++ b/bin/all_sky_search/pycbc_calculate_dqflag @@ -26,6 +26,8 @@ parser.add_argument('--flag', type=str, required=True, help="name of the data quality flag") parser.add_argument('--ifo', type=str, required=True, help="interferometer to analyze") +parser.add_argument('--sample-frequency', required=True, type=int, + help="timeseries sampling frequency in Hz") parser.add_argument("--output-file", required=True, help="name of output file") parser.add_argument("--output-channel", required=False, @@ -35,13 +37,14 @@ args = parser.parse_args() pycbc.init_logging(args.verbose) -def make_dq_ts(dq_segs, start, end, ifo, flag): +def make_dq_ts(dq_segs, start, end, ifo, flag, freq): """ Create a data quality timeseries """ logging.info('Creating data quality timeseries for flag %s', flag) # Keep just times which belong to science_segments - dq_times = numpy.arange(start, end) + dur = end - start + dq_times = numpy.linspace(start, end, int(freq*dur), endpoint=False) dq = numpy.zeros(len(dq_times)) # Identify times within segments for the chosen flag indexes, _ = veto.indices_within_segments(dq_times, [dq_segs], ifo=ifo, @@ -51,7 +54,7 @@ def make_dq_ts(dq_segs, start, end, ifo, flag): else: logging.warning('Veto definer segment list is empty for flag %s-%s', ifo, flag) - return TimeSeries(dq, epoch=start, delta_t=1.) + return TimeSeries(dq, epoch=start, delta_t=1.0/freq) ifo = args.ifo @@ -63,7 +66,7 @@ if len(flag_list)>1: # Create data quality time series dq = make_dq_ts(args.dq_segments, args.gps_start_time, args.gps_end_time, - ifo, flag) + ifo, flag, args.sample_frequency) dq.save(args.output_file, group=args.output_channel) diff --git a/bin/all_sky_search/pycbc_coinc_findtrigs b/bin/all_sky_search/pycbc_coinc_findtrigs index 234da72f53f..bfc58e92acb 100644 --- a/bin/all_sky_search/pycbc_coinc_findtrigs +++ b/bin/all_sky_search/pycbc_coinc_findtrigs @@ -181,7 +181,7 @@ if args.randomize_template_order: shuffle(template_ids) template_ids = template_ids[tmin:tmax] else: - template_ids = np.array([range(tmin, tmax)]) + template_ids = numpy.array([range(tmin, tmax)]) original_bank_len = len(template_ids) diff --git a/bin/all_sky_search/pycbc_coinc_statmap b/bin/all_sky_search/pycbc_coinc_statmap index 8fcd7836fd9..bfe97b866ac 100755 --- a/bin/all_sky_search/pycbc_coinc_statmap +++ b/bin/all_sky_search/pycbc_coinc_statmap @@ -257,10 +257,22 @@ bg_far_exc, fg_far_exc = significance.get_far( background_time_exc, **significance_dict[ifo_combo]) -fg_far = significance.apply_far_limit(fg_far, significance_dict, combo=ifo_combo) -bg_far = significance.apply_far_limit(bg_far, significance_dict, combo=ifo_combo) -fg_far_exc = significance.apply_far_limit(fg_far_exc, significance_dict, combo=ifo_combo) -bg_far_exc = significance.apply_far_limit(bg_far_exc, significance_dict, combo=ifo_combo) +fg_far = significance.apply_far_limit( + fg_far, + significance_dict, + combo=ifo_combo) +bg_far = significance.apply_far_limit( + bg_far, + significance_dict, + combo=ifo_combo) +fg_far_exc = significance.apply_far_limit( + fg_far_exc, + significance_dict, + combo=ifo_combo) +bg_far_exc = significance.apply_far_limit( + bg_far_exc, + significance_dict, + combo=ifo_combo) f['background/ifar'] = conv.sec_to_year(1. / bg_far) f['background_exc/ifar'] = conv.sec_to_year(1. / bg_far_exc) @@ -421,6 +433,17 @@ while numpy.any(ifar_foreground >= background_time): return_counts=True, **significance_dict[ifo_combo]) + fg_far = significance.apply_far_limit( + fg_far, + significance_dict, + combo=ifo_combo + ) + bg_far = significance.apply_far_limit( + bg_far, + significance_dict, + combo=ifo_combo, + ) + # Update the ifar_foreground criteria depending on whether foreground # triggers are being removed via inclusive or exclusive background. if args.hierarchical_removal_against == 'inclusive': @@ -435,6 +458,11 @@ while numpy.any(ifar_foreground >= background_time): exc_zero_trigs.decimation_factor, background_time_exc, **significance_dict[ifo_combo]) + fg_far_exc = significance.apply_far_limit( + fg_far_exc, + significance_dict, + combo=ifo_combo + ) ifar_foreground = 1. / fg_far_exc # ifar_foreground has been updated and the code can continue. diff --git a/bin/all_sky_search/pycbc_coinc_statmap_inj b/bin/all_sky_search/pycbc_coinc_statmap_inj index 2dab3a71581..03e4eab26c6 100644 --- a/bin/all_sky_search/pycbc_coinc_statmap_inj +++ b/bin/all_sky_search/pycbc_coinc_statmap_inj @@ -37,12 +37,21 @@ if args.verbose: log_level = logging.INFO logging.basicConfig(format='%(asctime)s : %(message)s', level=log_level) -ifo_key = ''.join(args.ifos) -significance_dict = significance.digest_significance_options([ifo_key], args) window = args.cluster_window logging.info("Loading coinc zerolag triggers") zdata = pycbc.io.MultiifoStatmapData(files=args.zero_lag_coincs, ifos=args.ifos) + +if 'ifos' in zdata.attrs: + ifos = zdata.attrs['ifos'].split(' ') + logging.info('using ifos from file {}'.format(args.zero_lag_coincs[0])) +else: + ifos = args.ifos + logging.info('using ifos from command line input') + +ifo_key = ''.join(ifos) +significance_dict = significance.digest_significance_options([ifo_key], args) + zdata = zdata.cluster(window) f = h5py.File(args.output_file, "w") @@ -51,7 +60,7 @@ f.attrs['num_of_ifos'] = zdata.attrs['num_of_ifos'] f.attrs['pivot'] = zdata.attrs['pivot'] f.attrs['fixed'] = zdata.attrs['fixed'] f.attrs['timeslide_interval'] = zdata.attrs['timeslide_interval'] -f.attrs['ifos'] = ' '.join(sorted(args.ifos)) +f.attrs['ifos'] = ' '.join(sorted(ifos)) # Copy over the segment for coincs and singles for key in zdata.seg.keys(): @@ -90,7 +99,11 @@ if len(zdata) > 0: background_time, **significance_dict[ifo_key]) - fg_far_exc = significance.apply_far_limit(fg_far_exc, significance_dict, combo=ifo_key) + fg_far_exc = significance.apply_far_limit( + fg_far_exc, + significance_dict, + combo=ifo_key, + ) ifar_exc = 1. / fg_far_exc fap_exc = 1 - numpy.exp(- coinc_time / ifar_exc) diff --git a/bin/all_sky_search/pycbc_fit_sngls_split_binned b/bin/all_sky_search/pycbc_fit_sngls_split_binned index 7326715c700..961ec6f5892 100644 --- a/bin/all_sky_search/pycbc_fit_sngls_split_binned +++ b/bin/all_sky_search/pycbc_fit_sngls_split_binned @@ -22,6 +22,7 @@ from matplotlib import pyplot as plt import numpy as np from pycbc import events, bin_utils, results +from pycbc.io import HFile, SingleDetTriggers from pycbc.events import triggers as trigs from pycbc.events import trigger_fits as trstats from pycbc.events import stat as pystat @@ -82,6 +83,10 @@ parser.add_argument("--gating-veto-windows", nargs='+', parser.add_argument("--stat-fit-threshold", type=float, required=True, help="Only fit triggers with statistic value above this " "threshold. Required") +parser.add_argument("--plot-lower-stat-limit", type=float, required=True, + help="Plot triggers down to this value. Setting this too" + "low will incur huge memory usage in a full search." + "To avoid this, choose 5.5 or larger.") parser.add_argument("--fit-function", choices=["exponential", "rayleigh", "power"], help="Functional form for the maximum likelihood fit") @@ -96,6 +101,8 @@ pystat.insert_statistic_option_group(parser, default_ranking_statistic='single_ranking_only') args = parser.parse_args() +assert(args.stat_fit_threshold >= args.plot_lower_stat_limit) + pycbc.init_logging(args.verbose) logging.info('Opening trigger file: %s' % args.trigger_file) @@ -122,8 +129,12 @@ for ex_p in extparams: logging.info('Reading duration from trigger file') # List comprehension loops over templates; if a template has no triggers, accessing # the 0th entry of its region reference will return zero due to a quirk of h5py. - params[ex_p] = np.array([trigf[args.ifo + '/template_duration'][ref][0] - for ref in trigf[args.ifo + '/template_duration_template'][:]]) + params[ex_p] = np.array( + [ + trigf[args.ifo + '/template_duration'][ref][0] + for ref in trigf[args.ifo + '/template_duration_template'][:] + ] + ) else: logging.info("Calculating " + ex_p + " from template parameters") params[ex_p] = trigs.get_param(ex_p, args, params['mass1'], @@ -198,10 +209,65 @@ for i, lower_2, upper_2 in zip(range(args.split_two_nbins), logging.info('Getting template boundaries from trigger file') boundaries = trigf[args.ifo + '/template_boundaries'][:] + +trigf.close() + +# Setup a data mask to remove any triggers with SNR below threshold +# This works as a pre-filter as SNR is always greater than or equal +# to sngl_ranking, except in the psdvar case, where it could increase. +with HFile(args.trigger_file, 'r') as trig_file: + n_triggers_orig = trig_file[f'{args.ifo}/snr'].size + logging.info("Trigger file has %d triggers", n_triggers_orig) + logging.info('Generating trigger mask') + if f'{args.ifo}/psd_var_val' in trig_file: + idx, _, _ = trig_file.select( + lambda snr, psdvar: snr / psdvar ** 0.5 >= args.plot_lower_stat_limit, + f'{args.ifo}/snr', + f'{args.ifo}/psd_var_val', + return_indices=True + ) + else: + # psd_var_val may not have been calculated + idx, _ = trig_file.select( + lambda snr: snr >= args.plot_lower_stat_limit, + f'{args.ifo}/snr', + return_indices=True + ) + data_mask = np.zeros(n_triggers_orig, dtype=bool) + data_mask[idx] = True + + +logging.info('Calculating single stat values from trigger file') +trigs = SingleDetTriggers( + args.trigger_file, + None, + None, + None, + None, + args.ifo, + premask=data_mask +) +# This is the direct pointer to the HDF file, used later on +trigf = trigs.trigs_f + +stat = trigs.get_ranking(args.sngl_ranking) +time = trigs.end_time + + +logging.info('Processing template boundaries') max_boundary_id = np.argmax(boundaries) sorted_boundary_list = np.sort(boundaries) -logging.info('Processing template boundaries') +# In the two blocks of code that follows we are trying to figure out the index +# ranges in the masked trigger lists corresponding to the "boundaries". +# We will do this by walking over the boundaries in the order they're +# stored in the file, adding in the number of triggers not removed by the +# mask every time. + +# First step is to loop over the "boundaries" which gives the start position +# of each block of triggers (corresponding to one template) in the full trigger +# merge file. +# Here we identify the end_idx for the triggers corresponding to each template. where_idx_end = np.zeros_like(boundaries) for idx, idx_start in enumerate(boundaries): if idx == max_boundary_id: @@ -210,22 +276,41 @@ for idx, idx_start in enumerate(boundaries): where_idx_end[idx] = sorted_boundary_list[ np.argmax(sorted_boundary_list == idx_start) + 1] -logging.info('Calculating single stat values from trigger file') -rank_method = pystat.get_statistic_from_opts(args, [args.ifo]) -stat = rank_method.get_sngl_ranking(trigf[args.ifo]) +# Next we need to map these start/stop indices in the full file, to the start +# stop indices in the masked list of triggers. We do this by figuring out +# how many triggers are in the masked list for each template in the order they +# are stored in the trigger merge file, and keep a running sum. +curr_count = 0 +mask_start_idx = np.zeros_like(boundaries) +mask_end_idx = np.zeros_like(boundaries) +for idx_start in sorted_boundary_list: + boundary_idx = np.argmax(boundaries == idx_start) + idx_end = where_idx_end[boundary_idx] + mask_start_idx[boundary_idx] = curr_count + curr_count += np.sum(trigs.mask[idx_start:idx_end]) + mask_end_idx[boundary_idx] = curr_count + if args.veto_file: logging.info('Applying DQ vetoes') - time = trigf[args.ifo + '/end_time'][:] - remove, junk = events.veto.indices_within_segments(time, [args.veto_file], - ifo=args.ifo, segment_name=args.veto_segment_name) - # Set stat to zero for triggers being vetoed: given that the fit threshold is - # >0 these will not be fitted or plotted. Avoids complications from changing - # the number of triggers, ie changes of template boundary. - stat[remove] = np.zeros_like(remove) - time[remove] = np.zeros_like(remove) - logging.info('{} out of {} trigs removed after vetoing with {} from {}'.format( - remove.size, stat.size, args.veto_segment_name, args.veto_file)) + remove, junk = events.veto.indices_within_segments( + time, + [args.veto_file], + ifo=args.ifo, + segment_name=args.veto_segment_name + ) + # Set stat to zero for triggers being vetoed: given that the fit threshold + # is >0 these will not be fitted or plotted. Avoids complications from + # changing the number of triggers, ie changes of template boundary. + stat[remove] = 0. + time[remove] = 0. + logging.info( + '%d out of %d trigs removed after vetoing with %s from %s', + remove.size, + stat.size, + args.veto_segment_name, + args.veto_file + ) if args.gating_veto_windows: logging.info('Applying veto to triggers near gates') @@ -236,19 +321,24 @@ if args.gating_veto_windows: raise ValueError("Gating veto window values must be negative before " "gates and positive after gates.") if not (gveto_before == 0 and gveto_after == 0): - time = trigf[args.ifo + '/end_time'][:] autogate_times = np.unique(trigf[args.ifo + '/gating/auto/time'][:]) if args.ifo + '/gating/file' in trigf: detgate_times = trigf[args.ifo + '/gating/file/time'][:] else: detgate_times = [] gate_times = np.concatenate((autogate_times, detgate_times)) - gveto_remove = events.veto.indices_within_times(time, gate_times + gveto_before, - gate_times + gveto_after) - stat[gveto_remove] = np.zeros_like(gveto_remove) - time[gveto_remove] = np.zeros_like(gveto_remove) - logging.info('{} out of {} trigs removed after vetoing triggers near gates'.format( - gveto_remove.size, stat.size)) + gveto_remove = events.veto.indices_within_times( + time, + gate_times + gveto_before, + gate_times + gveto_after + ) + stat[gveto_remove] = 0. + time[gveto_remove] = 0. + logging.info( + '%d out of %d trigs removed after vetoing triggers near gates', + gveto_remove.size, + stat.size + ) for x in range(args.split_one_nbins): if not args.prune_number: @@ -266,9 +356,8 @@ for x in range(args.split_one_nbins): time_inbin = [] # getting triggers that are in these templates for idx in id_in_both: - where_idx_start = boundaries[idx] - vals_inbin += list(stat[where_idx_start:where_idx_end[idx]]) - time_inbin += list(time[where_idx_start:where_idx_end[idx]]) + vals_inbin += list(stat[mask_start_idx[idx]:mask_end_idx[idx]]) + time_inbin += list(time[mask_start_idx[idx]:mask_end_idx[idx]]) vals_inbin = np.array(vals_inbin) time_inbin = np.array(time_inbin) @@ -276,32 +365,36 @@ for x in range(args.split_one_nbins): count_pruned = 0 logging.info('Pruning in split {}-{} {}-{}'.format( args.split_param_one, x, args.split_param_two, y)) + logging.info('Currently have %d triggers', len(vals_inbin)) while count_pruned < args.prune_number: # Getting loudest statistic value in split max_val_arg = vals_inbin.argmax() max_statval = vals_inbin[max_val_arg] - remove = np.nonzero(abs(time_inbin[max_val_arg] - time) - < args.prune_window)[0] + remove = np.nonzero( + abs(time_inbin[max_val_arg] - time) < args.prune_window + )[0] # Remove from inbin triggers as well in case there # are more pruning iterations - remove_inbin = np.nonzero(abs(time_inbin[max_val_arg] - time_inbin) - < args.prune_window)[0] - logging.info('Prune {}: removing {} triggers around time {:.2f},' - ' {} in this split'.format(count_pruned, remove.size, - time[max_val_arg], - remove_inbin.size)) + remove_inbin = np.nonzero( + abs(time_inbin[max_val_arg] - time_inbin) < args.prune_window + )[0] + logging.info( + 'Prune %d: removing %d triggers around %.2f, %d in this split', + count_pruned, + remove.size, + time[max_val_arg], + remove_inbin.size + ) # Set pruned triggers' stat values to zero, as above for vetoes - vals_inbin[remove_inbin] = np.zeros_like(remove_inbin) - time_inbin[remove_inbin] = np.zeros_like(remove_inbin) - stat[remove] = np.zeros_like(remove) - time[remove] = np.zeros_like(remove) + vals_inbin[remove_inbin] = 0. + time_inbin[remove_inbin] = 0. + stat[remove] = 0. + time[remove] = 0. count_pruned += 1 -trigf.close() - logging.info('Setting up plotting and fitting limit values') -minplot = max(stat[np.nonzero(stat)].min(), args.stat_fit_threshold - 1) +minplot = max(stat[np.nonzero(stat)].min(), args.plot_lower_stat_limit) min_fit = max(minplot, args.stat_fit_threshold) max_fit = 1.05 * stat.max() if args.plot_max_x: @@ -355,8 +448,7 @@ for x in range(args.split_one_nbins): if len(indices_all_conditions) == 0: continue vals_inbin = [] for idx in indices_all_conditions: - where_idx_start = boundaries[idx] - vals_inbin += list(stat[where_idx_start:where_idx_end[idx]]) + vals_inbin += list(stat[mask_start_idx[idx]:mask_end_idx[idx]]) vals_inbin = np.array(vals_inbin) vals_above_thresh = vals_inbin[vals_inbin >= args.stat_fit_threshold] diff --git a/bin/all_sky_search/pycbc_plot_kde_vals b/bin/all_sky_search/pycbc_plot_kde_vals index 54a897041ec..9c8ffde731a 100644 --- a/bin/all_sky_search/pycbc_plot_kde_vals +++ b/bin/all_sky_search/pycbc_plot_kde_vals @@ -1,6 +1,6 @@ #!/usr/bin/env python -import numpy, h5py, argparse, logging +import numpy, h5py, argparse import matplotlib.pyplot as plt from matplotlib.colors import LogNorm @@ -15,6 +15,10 @@ parser.add_argument('--log-axis', nargs='+', choices=['True', 'False'], required help='For each parameter, specify True for a log axis and False ' 'for a linear axis') parser.add_argument('--plot-type', choices=['kde_vs_param', 'param_vs_param']) +parser.add_argument('--plot-order', choices=['file', 'increasing', 'decreasing'], + default='file', + help='Choose the order to plot KDE values in: "file", "increasing"' + ' or "decreasing"') parser.add_argument('--which-kde', choices=['signal_kde', 'template_kde', 'ratio_kde']) parser.add_argument('--plot-dir', required=True) parser.add_argument('--verbose', action='count') @@ -34,20 +38,29 @@ if args.signal_file: signal_kde = signal_data['data_kde'][:] template_data = h5py.File(args.template_file, 'r') template_kde = template_data['data_kde'][:] -param0 = template_data[args.param[0]][:] -if len(args.param) > 1: - param1 = template_data[args.param[1]][:] +param_arrays = [template_data[param][:] for param in args.param] + +kde_values = { + 'signal_kde': signal_kde, + 'template_kde': template_kde, + 'ratio_kde': signal_kde / template_kde, +}[args.which_kde] + +# Sort each of the parameter arrays +if args.plot_order == 'increasing': + idx = numpy.argsort(kde_values) +elif args.plot_order == 'decreasing': + idx = numpy.argsort(kde_values)[::-1] +else: + idx = numpy.arange(len(kde_values)) +param_arrays = [param[idx] for param in param_arrays] +param0 = param_arrays[0] +param1 = param_arrays[1] if len(param_arrays) > 1 else None +kde_values = kde_values[idx] if args.plot_type == 'kde_vs_param': fig, ax = plt.subplots(1, figsize=(12,7), constrained_layout=True) - if args.which_kde == 'signal_kde': - im = ax.scatter(signal_kde, param0, marker=".", c="r", s=5) - elif args.which_kde == 'template_kde': - im = ax.scatter(template_kde, param0, marker=".", c="r", s=5) - elif args.which_kde == 'ratio_kde': - im = ax.scatter(signal_kde / template_kde, param0, marker=".", c="r", s=5) - else: - raise RuntimeError('Unknown value of which_kde', args.which_kde) + im = ax.scatter(kde_values, param0, marker=".", c="r", s=5) ax.set_xticklabels(args.which_kde, fontsize=13) ax.set_yticklabels(args.param[0], fontsize=13) ax.set_xlabel(args.which_kde, fontsize=15) @@ -62,18 +75,8 @@ if args.plot_type == 'kde_vs_param': elif args.plot_type == 'param_vs_param': fig, ax = plt.subplots(1, figsize=(12,7), constrained_layout=True) - if args.which_kde == 'signal_kde': - im = ax.scatter(param0, param1, marker=".", c=signal_kde, cmap='plasma', - s=5, norm=LogNorm()) - elif args.which_kde == 'template_kde': - im = ax.scatter(param0, param1, marker=".", c=template_kde, cmap='plasma', - s=5, norm=LogNorm()) - elif args.which_kde == 'ratio_kde': - im = ax.scatter(param0, param1, marker=".", c=signal_kde / template_kde, - cmap='RdBu_r', s=5, norm=LogNorm()) - else: - raise RuntimeError('Unknown value of which_kde', args.which_kde) - cbar = fig.colorbar(im, ax=ax) + im = ax.scatter(param0, param1, marker=".", c=kde_values, cmap='turbo', s=5, norm=LogNorm()) + cbar = fig.colorbar(im, ax=ax, pad=0.01) ax.set_xticklabels(args.param[0], fontsize=13) ax.set_yticklabels(args.param[1], fontsize=13) ax.set_xlabel(args.param[0], fontsize=15) @@ -86,8 +89,9 @@ elif args.plot_type == 'param_vs_param': ax.set_yscale('log') else: ax.set_yscale('linear') - cbar.ax.set_ylabel(args.which_kde, rotation=270, fontsize=15) - plot_loc = args.plot_dir + args.which_kde + '_' + args.param[0] + '_vs_' + args.param[1] + '.png' + cbar.ax.set_ylabel(args.which_kde, rotation=270, fontsize=15, labelpad=15) + cbar.ax.tick_params(labelsize=15) + plot_loc = f'{args.plot_dir}/{args.which_kde}_{args.plot_order}_{args.param[0]}_vs_{args.param[1]}.png' plt.savefig(plot_loc) else: diff --git a/bin/all_sky_search/pycbc_sngls_statmap b/bin/all_sky_search/pycbc_sngls_statmap index 9ed977c5ac9..e3c2f03ebdb 100755 --- a/bin/all_sky_search/pycbc_sngls_statmap +++ b/bin/all_sky_search/pycbc_sngls_statmap @@ -146,8 +146,16 @@ bg_far, fg_far = significance.get_far( fg_time, **significance_dict[ifo]) -fg_far = significance.apply_far_limit(fg_far, significance_dict, combo=ifo) -bg_far = significance.apply_far_limit(bg_far, significance_dict, combo=ifo) +fg_far = significance.apply_far_limit( + fg_far, + significance_dict, + combo=ifo, +) +bg_far = significance.apply_far_limit( + bg_far, + significance_dict, + combo=ifo, +) bg_ifar = 1. / bg_far fg_ifar = 1. / fg_far @@ -341,6 +349,18 @@ while numpy.any(ifar_louder > hier_ifar_thresh_s): fg_time, **significance_dict[ifo]) + fg_far = significance.apply_far_limit( + fg_far, + significance_dict, + combo=ifo, + ) + + bg_far = significance.apply_far_limit( + bg_far, + significance_dict, + combo=ifo, + ) + bg_ifar = 1. / bg_far fg_ifar = 1. / fg_far @@ -359,6 +379,12 @@ while numpy.any(ifar_louder > hier_ifar_thresh_s): fg_time_exc, **significance_dict[ifo]) + fg_far_exc = significance.apply_far_limit( + fg_far_exc, + significance_dict, + combo=ifo, + ) + fg_ifar_exc = 1. / fg_far_exc ifar_louder = fg_ifar_exc diff --git a/bin/all_sky_search/pycbc_sngls_statmap_inj b/bin/all_sky_search/pycbc_sngls_statmap_inj index a75cfee613b..c7ba970d7c4 100644 --- a/bin/all_sky_search/pycbc_sngls_statmap_inj +++ b/bin/all_sky_search/pycbc_sngls_statmap_inj @@ -117,8 +117,14 @@ bg_far_exc, fg_far_exc = significance.get_far( fg_time_exc, **significance_dict[ifo]) -fg_far_exc = significance.apply_far_limit(fg_far_exc, significance_dict, combo=ifo) -bg_far_exc = significance.apply_far_limit(bg_far_exc, significance_dict, combo=ifo) +fg_far_exc = significance.apply_far_limit( + fg_far_exc, + significance_dict, + combo=ifo) +bg_far_exc = significance.apply_far_limit( + bg_far_exc, + significance_dict, + combo=ifo) fg_ifar_exc = 1. / fg_far_exc bg_ifar_exc = 1. / bg_far_exc diff --git a/bin/bank/pycbc_aligned_stoch_bank b/bin/bank/pycbc_aligned_stoch_bank index 7cc373ce68c..9b9a473d3fa 100644 --- a/bin/bank/pycbc_aligned_stoch_bank +++ b/bin/bank/pycbc_aligned_stoch_bank @@ -173,7 +173,7 @@ metricParams = tmpltbank.determine_eigen_directions( vary_fmax=(opts.vary_fupper or ethincaParams.doEthinca), vary_density=freqStep) -logging.info("Identify limits of frequency.") +logging.info("Identifying limits of frequency") # Choose the frequency values to use for metric calculation if opts.vary_fupper==False: @@ -189,8 +189,11 @@ else: # total masses fs = numpy.array(list(metricParams.evals.keys()), dtype=float) fs.sort() - lowEve, highEve = tmpltbank.find_max_and_min_frequencies(\ - opts.bank_fupper_formula, massRangeParams, fs) + lowEve, highEve = tmpltbank.find_max_and_min_frequencies( + opts.bank_fupper_formula, + massRangeParams, + fs + ) refFreq = lowEve fs = fs[fs >= lowEve] fs = fs[fs <= highEve] @@ -205,12 +208,15 @@ evecsCVdict = {} evecsCVdict[refFreq] = evecsCV metricParams.evecsCV = evecsCVdict -# Initialize the class for generating the partitioned bank logging.info("Initialize the PartitionedTmpltbank class") -partitioned_bank_object = tmpltbank.PartitionedTmpltbank(massRangeParams, - metricParams, refFreq, (opts.max_mismatch)**0.5, - bin_range_check=1) +partitioned_bank_object = tmpltbank.PartitionedTmpltbank( + massRangeParams, + metricParams, + refFreq, + opts.max_mismatch ** 0.5, + bin_range_check=1 +) # Initialise counters N = 0 @@ -231,11 +237,12 @@ while True: rMass1, rMass2, rSpin1z, rSpin2z = \ tmpltbank.get_random_mass(100000, massRangeParams) if opts.vary_fupper: - mass_dict = {} - mass_dict['m1'] = rMass1 - mass_dict['m2'] = rMass2 - mass_dict['s1z'] = rSpin1z - mass_dict['s2z'] = rSpin2z + mass_dict = { + 'mass1': rMass1, + 'mass2': rMass2, + 'spin1z': rSpin1z, + 'spin2z': rSpin2z + } refEve = tmpltbank.return_nearest_cutoff( opts.bank_fupper_formula, mass_dict, fs) lambdas = tmpltbank.get_chirp_params(rMass1, rMass2, rSpin1z, diff --git a/bin/bank/pycbc_brute_bank b/bin/bank/pycbc_brute_bank index f69b43da731..ef07ef06770 100644 --- a/bin/bank/pycbc_brute_bank +++ b/bin/bank/pycbc_brute_bank @@ -22,6 +22,7 @@ import numpy, h5py, logging, argparse, numpy.random import pycbc.waveform, pycbc.filter, pycbc.types, pycbc.psd, pycbc.fft, pycbc.conversions from pycbc import transforms +from pycbc.waveform.spa_tmplt import spa_length_in_time from pycbc.distributions import read_params_from_config from pycbc.distributions.utils import draw_samples_from_config, prior_from_config from scipy.stats import gaussian_kde @@ -45,6 +46,8 @@ parser.add_argument('--approximant', required=True, parser.add_argument('--minimal-match', default=0.97, type=float) parser.add_argument('--buffer-length', default=2, type=float, help='size of waveform buffer in seconds') +parser.add_argument('--max-signal-length', type= float, + help="When specified, it cuts the maximum length of the waveform model to the lengh provided") parser.add_argument('--sample-rate', default=2048, type=float, help='sample rate in seconds') parser.add_argument('--low-frequency-cutoff', default=20.0, type=float) @@ -269,15 +272,28 @@ class GenUniformWaveform(object): self.md = q._data[-100:] self.md2 = q._data[0:100] - def generate(self, **kwds): + def generate(self, **kwds): kwds.update(fdict) - if kwds['approximant'] in pycbc.waveform.fd_approximants(): - ws = pycbc.waveform.get_fd_waveform(delta_f=self.delta_f, - f_lower=self.f_lower, **kwds) - hp = ws[0] - hc = ws[1] + if args.max_signal_length is not None: + flow = numpy.arange(self.f_lower, 100, .1)[::-1] + length = spa_length_in_time(mass1=kwds['mass1'], mass2=kwds['mass2'], f_lower=flow, phase_order=-1) + maxlen = args.max_signal_length + x = numpy.searchsorted(length, maxlen) - 1 + l = length[x] + f = flow[x] + else: + f = self.f_lower + + kwds['f_lower'] = f + + if kwds['approximant'] in pycbc.waveform.fd_approximants(): + hp, hc = pycbc.waveform.get_fd_waveform(delta_f=self.delta_f, + f_ref=10.0, **kwds) + + if 'fratio' in kwds: hp = hc * kwds['fratio'] + hp * (1 - kwds['fratio']) + else: dt = 1.0 / args.sample_rate hp = pycbc.waveform.get_waveform_filter( @@ -342,10 +358,10 @@ def draw(rtype): p = bank.keys() p = [k for k in p if k not in fdict] p.remove('approximant') + p.remove('f_lower') if args.input_config is not None: p = variable_args bdata = numpy.array([bank.key(k)[-trail:] for k in p]) - kde = gaussian_kde(bdata) points = kde.resample(size=size) params = {k: v for k, v in zip(p, points)} @@ -422,9 +438,10 @@ def cdraw(rtype, ts, te): return None return p - + tau0s = args.tau0_start tau0e = tau0s + args.tau0_crawl + go = True region = 0 @@ -447,6 +464,7 @@ while tau0s < args.tau0_end: if r > 10: conv = uconv kloop = 0 + while ((kloop == 0) or (kconv / okconv) > .5) and len(bank) > 10: r += 1 kloop += 1 @@ -455,9 +473,11 @@ while tau0s < args.tau0_end: bank, kconv = bank.check_params(gen, params, args.minimal_match) logging.info("%s: Round (K) (%s): %s Size: %s conv: %s added: %s", region, kloop, r, len(bank), kconv, len(bank) - blen) + + if uconv: logging.info('Ratio of convergences: %2.3f' % (kconv / (uconv))) - logging.info('Progress: {:.0%} completed'.format(tau0s/args.tau0_end)) + logging.info('Progress: {:.0%} completed'.format(tau0e/args.tau0_end)) if kloop == 1: okconv = kconv @@ -473,9 +493,9 @@ while tau0s < args.tau0_end: tau0e += args.tau0_crawl / 2 o = h5py.File(args.output_file, 'w') + for k in bank.keys(): val = bank.key(k) if val.dtype.char == 'U': val = val.astype('bytes') o[k] = val -o['f_lower'] = numpy.ones(len(val)) * args.low_frequency_cutoff diff --git a/bin/minifollowups/pycbc_foreground_minifollowup b/bin/minifollowups/pycbc_foreground_minifollowup index 41b3f015887..4d9a19d277e 100644 --- a/bin/minifollowups/pycbc_foreground_minifollowup +++ b/bin/minifollowups/pycbc_foreground_minifollowup @@ -103,10 +103,10 @@ file_val = args.analysis_category stat = f['{}/stat'.format(file_val)][:] if args.sort_variable not in f[file_val]: - all_datasets = [re.sub(file_val, '', ds).strip('/') for ds in get_all_subkeys(f, file_val)] - raise KeyError('Sort variable {0} not in {1}: sort choices in ' - '{1} are {2}'.format(args.sort_variable, file_val, - ', '.join(all_datasets))) + all_datasets = [re.sub(file_val, '', ds).strip('/') + for ds in get_all_subkeys(f, file_val)] + raise KeyError(f'Sort variable {args.sort_variable} not in {file_val}: sort' + f'choices in {file_val} are ' + ', '.join(all_datasets)) events_to_read = num_events * 100 @@ -126,8 +126,8 @@ tids = {} # Version used for multi-ifo coinc code ifo_list = f.attrs['ifos'].split(' ') for ifo in ifo_list: - times[ifo] = f['{}/{}/time'.format(file_val,ifo)][:][event_idx] - tids[ifo] = f['{}/{}/trigger_id'.format(file_val, ifo)][:][event_idx] + times[ifo] = f[f'{file_val}/{ifo}/time'][:][event_idx] + tids[ifo] = f[f'{file_val}/{ifo}/trigger_id'][:][event_idx] bank_data = h5py.File(args.bank_file, 'r') @@ -139,21 +139,30 @@ curr_idx = -1 while event_count < num_events and curr_idx < (events_to_read - 1): curr_idx += 1 files = wf.FileList([]) + duplicate = False + # Times and trig ids for trigger timeseries plot ifo_times_strings = [] ifo_tids_strings = [] - duplicate = False - for ifo in times: - ifo_times_string = '%s:%s' % (ifo, times[ifo][curr_idx]) - ifo_tids_string = '%s:%s' % (ifo, tids[ifo][curr_idx]) - ifo_times_strings += [ifo_times_string] - ifo_tids_strings += [ifo_tids_string] - # For background do not want to follow up 10 background coincs with - # the same event in ifo 1 and different events in ifo 2 + # Mean time to use as reference for zerolag + mtime = coinc.mean_if_greater_than_zero( + [times[ifo][curr_idx] for ifo in times])[0] + + for ifo in times: + plottime = times[ifo][curr_idx] + if plottime == -1: # Ifo is not in coinc + # Use mean time instead and don't plot special trigger + plottime = mtime + else: # Do plot special trigger + ifo_tids_strings += ['%s:%s' % (ifo, tids[ifo][curr_idx])] + ifo_times_strings += ['%s:%s' % (ifo, plottime)] + + # For background do not want to follow up 10 background coincs with the + # same event in ifo 1 and different events in ifo 2, so record times if ifo not in event_times: event_times[ifo] = [] - # Don't skip coincs in zerolag or due to the sentinel time -1 + # Only do this for background coincident triggers & not sentinel time -1 if 'background' in args.analysis_category and \ times[ifo][curr_idx] != -1 and \ int(times[ifo][curr_idx]) in event_times[ifo]: @@ -175,10 +184,10 @@ while event_count < num_events and curr_idx < (events_to_read - 1): workflow, skipped_data, args.output_dir, - tags=['SKIP_{}'.format(event_count)]),) + tags=[f'SKIP_{event_count}']),) skipped_data = [] - bank_id = f['{}/template_id'.format(file_val)][:][sorting][curr_idx] + bank_id = f[f'{file_val}/template_id'][:][sorting][curr_idx] layouts += (mini.make_coinc_info(workflow, single_triggers, tmpltbank_file, coinc_file, args.output_dir, n_loudest=curr_idx, @@ -193,7 +202,7 @@ while event_count < num_events and curr_idx < (events_to_read - 1): params['%s_end_time' % ifo] = times[ifo][curr_idx] try: # Only present for precessing case - params['u_vals_%s'%ifo] = \ + params['u_vals_%s' % ifo] = \ fsdt[ifo][ifo]['u_vals'][tids[ifo][curr_idx]] except: pass @@ -236,8 +245,8 @@ while event_count < num_events and curr_idx < (events_to_read - 1): tags=args.tags + [str(event_count)]) break else: - logging.info('Trigger time {} is not valid in {}, ' \ - 'skipping singles plots'.format(time, single.ifo)) + logging.info(f'Trigger time {time} is not valid in ' + f'{single.ifo}, skipping singles plots') layouts += list(layout.grouper(files, 2)) diff --git a/bin/minifollowups/pycbc_page_snglinfo b/bin/minifollowups/pycbc_page_snglinfo index e6bc8dc31a4..eb248daecd1 100644 --- a/bin/minifollowups/pycbc_page_snglinfo +++ b/bin/minifollowups/pycbc_page_snglinfo @@ -138,8 +138,11 @@ else: if args.ranking_statistic in ["quadsum", "single_ranking_only"]: stat_name = sngl_stat_name + stat_name_long = sngl_stat_name else: - stat_name = '_with_'.join(ranking_statistic, sngl_ranking) + # Name would be too long - just call it ranking statistic + stat_name = 'Ranking Statistic' + stat_name_long = ' with '.join([args.ranking_statistic, args.sngl_ranking]) headers.append(stat_name) @@ -201,7 +204,7 @@ html = pycbc.results.dq.redirect_javascript + \ if args.n_loudest: title = 'Parameters of single-detector event ranked %s' \ % (args.n_loudest + 1) - caption = 'Parameters of the single-detector event ranked number %s by %s. The figures below show the mini-followup data for this event.' % (args.n_loudest + 1, stat_name) + caption = 'Parameters of the single-detector event ranked number %s by %s. The figures below show the mini-followup data for this event.' % (args.n_loudest + 1, stat_name_long) else: title = 'Parameters of single-detector event' caption = 'Parameters of the single-detector event. The figures below show the mini-followup data for this event.' diff --git a/bin/minifollowups/pycbc_plot_trigger_timeseries b/bin/minifollowups/pycbc_plot_trigger_timeseries index 78f1e562d4d..ef9829dab01 100644 --- a/bin/minifollowups/pycbc_plot_trigger_timeseries +++ b/bin/minifollowups/pycbc_plot_trigger_timeseries @@ -14,6 +14,7 @@ # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + """ Plot the single detector trigger timeseries """ import argparse, logging, pycbc.version, pycbc.results, sys from pycbc.types import MultiDetOptionAction @@ -91,25 +92,17 @@ for ifo in args.single_trigger_files.keys(): logging.info("Getting %s", args.plot_type) rank = ranking.get_sngls_ranking_from_trigs(trigs, args.plot_type) - if all(rank == 1): - # This is the default value to say "downranked beyond consideration" - # so skip these events - pylab.scatter(-2 * args.window, 0, - color=pycbc.results.ifo_color(ifo), - marker='x', - label=ifo) - continue - pylab.scatter(trigs['end_time'] - t, rank, color=pycbc.results.ifo_color(ifo), marker='x', label=ifo) - # Minimum rank which hasn't been set to the default of 1 - min_rank = min(min_rank, rank[rank > 1].min()) + min_rank = min(min_rank, rank.min()) if args.special_trigger_ids: special_idx = args.special_trigger_ids[ifo] - if special_idx not in idx: + if special_idx == None: # No special trigger for this ifo + continue + elif special_idx not in idx: logging.info("IDX %d not in kept list", args.special_trigger_ids[ifo]) continue @@ -135,7 +128,10 @@ logging.info("Saving figure") pycbc.results.save_fig_with_metadata(fig, args.output_file, cmd = ' '.join(sys.argv), title = 'Single Detector Trigger Timeseries (%s)' % args.plot_type, - caption = 'Time series showing the single detector triggers' - ' centered around the time of the trigger of interest.', + caption = 'Time series showing the single-detector triggers ' + 'centered around the time of the trigger of interest. ' + 'Triggers with ranking 1 have been downweighted beyond ' + 'consideration, but may still form insignificant ' + 'events.', ) logging.info("Done!") diff --git a/bin/minifollowups/pycbc_sngl_minifollowup b/bin/minifollowups/pycbc_sngl_minifollowup index bd3ae4bed1a..01fbc743e83 100644 --- a/bin/minifollowups/pycbc_sngl_minifollowup +++ b/bin/minifollowups/pycbc_sngl_minifollowup @@ -14,29 +14,24 @@ # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -""" Followup foreground events +""" +Followup single-detector triggers which do not contribute to foreground events """ import os, argparse, logging import numpy from ligo.lw import table from ligo.lw import utils as ligolw_utils from pycbc.results import layout +from pycbc.types.optparse import MultiDetOptionAction from pycbc.events import select_segments_by_definer import pycbc.workflow.minifollowups as mini import pycbc.version import pycbc.workflow as wf import pycbc.events from pycbc.workflow.core import resolve_url_to_file -from pycbc.events import stat +from pycbc.events import stat, veto, coinc from pycbc.io import hdf -def add_wiki_row(outfile, cols): - """ - Adds a wiki-formatted row to an output file from a list or a numpy array. - """ - with open(outfile, 'a') as f: - f.write('||%s||\n' % '||'.join(map(str,cols))) - parser = argparse.ArgumentParser(description=__doc__[1:]) parser.add_argument('--version', action='version', version=pycbc.version.git_verbose_msg) parser.add_argument('--bank-file', @@ -44,11 +39,23 @@ parser.add_argument('--bank-file', parser.add_argument('--single-detector-file', help="HDF format merged single detector trigger files") parser.add_argument('--instrument', help="Name of interferometer e.g. H1") +parser.add_argument('--foreground-censor-file', + help="The censor file to be used if vetoing triggers " + "in the foreground of the search (optional).") +parser.add_argument('--foreground-segment-name', + help="If using foreground censor file must also provide " + "the name of the segment to use as a veto.") parser.add_argument('--veto-file', - help="The veto file to be used if vetoing triggers (optional).") + help="The veto file to be used if vetoing triggers " + "(optional).") parser.add_argument('--veto-segment-name', - help="If using veto file must also provide the name of the segment to use " - "as a veto.") + help="If using veto file must also provide the name of " + "the segment to use as a veto.") +parser.add_argument("--gating-veto-windows", nargs='+', + action=MultiDetOptionAction, + help="Seconds to be vetoed before and after the central time " + "of each gate. Given as detector-values pairs, e.g. " + "H1:-1,2.5 L1:-1,2.5 V1:0,0") parser.add_argument('--inspiral-segments', help="xml segment file containing the inspiral analysis " "times") @@ -60,17 +67,22 @@ parser.add_argument('--inspiral-data-analyzed-name', "analyzed by each analysis job.") parser.add_argument('--min-snr', type=float, default=6.5, help="Minimum SNR to consider for loudest triggers") -parser.add_argument('--non-coinc-time-only', default=False, - action='store_true', +parser.add_argument('--non-coinc-time-only', action='store_true', help="If given remove (veto) single-detector triggers " "that occur during a time when at least one other " "instrument is taking science data.") +parser.add_argument('--vetoed-time-only', action='store_true', + help="If given, only report on single-detector triggers " + "that occur during vetoed times.") parser.add_argument('--minimum-duration', default=None, type=float, help="If given only consider single-detector triggers " "with template duration larger than this.") parser.add_argument('--maximum-duration', default=None, type=float, help="If given only consider single-detector triggers " "with template duration smaller than this.") +parser.add_argument('--cluster-window', type=float, default=10, + help="Window (seconds) over which to cluster triggers " + "when finding the loudest-ranked. Default=10") wf.add_workflow_command_line_group(parser) wf.add_workflow_settings_cli(parser, include_subdax_opts=True) stat.insert_statistic_option_group(parser, @@ -95,6 +107,12 @@ sngl_file = resolve_url_to_file( attrs={'ifos': args.instrument} ) +# Flatten the statistic_files option: +statfiles = [] +for f in sum(args.statistic_files, []): + statfiles.append(resolve_url_to_file(os.path.abspath(f))) +statfiles = wf.FileList(statfiles) if statfiles is not [] else None + if args.veto_file is not None: veto_file = resolve_url_to_file( os.path.abspath(args.veto_file), @@ -102,6 +120,7 @@ if args.veto_file is not None: ) else: veto_file = None + insp_segs = resolve_url_to_file(os.path.abspath(args.inspiral_segments)) insp_data_seglists = select_segments_by_definer\ (args.inspiral_segments, segment_name=args.inspiral_data_read_name, @@ -112,20 +131,89 @@ num_events = int(workflow.cp.get_opt_tags('workflow-sngl_minifollowups', 'num-sngl-events', '')) # This helps speed up the processing to ignore a large fraction of triggers -snr_mask = None +mask = None +f = hdf.HFile(args.single_detector_file, 'r') +n_triggers = f['{}/snr'.format(args.instrument)].size +logging.info("%i triggers in file", n_triggers) if args.min_snr: logging.info('Calculating Prefilter') - f = hdf.HFile(args.single_detector_file, 'r') idx, _ = f.select(lambda snr: snr > args.min_snr, '{}/snr'.format(args.instrument), return_indices=True) - snr_mask = numpy.zeros(len(f['{}/snr'.format(args.instrument)]), - dtype=bool) - snr_mask[idx] = True + mask = numpy.zeros(n_triggers, dtype=bool) + mask[idx] = True + if len(idx) < num_events: + logging.info("Fewer triggers exist after the --min-snr cut (%d) " + "than requested for the minifollowup (%d)", + len(idx), num_events) -trigs = hdf.SingleDetTriggers(args.single_detector_file, args.bank_file, - args.veto_file, args.veto_segment_name, - None, args.instrument, premask=snr_mask) +trigs = hdf.SingleDetTriggers( + args.single_detector_file, + args.bank_file, + args.foreground_censor_file, + args.foreground_segment_name, + None, + args.instrument, + premask=mask +) + +# Include gating vetoes +if args.gating_veto_windows: + logging.info("Getting gating vetoes") + gating_veto = args.gating_veto_windows[args.instrument].split(',') + gveto_before = float(gating_veto[0]) + gveto_after = float(gating_veto[1]) + if gveto_before > 0 or gveto_after < 0: + raise ValueError("Gating veto window values must be negative before " + "gates and positive after gates.") + if not (gveto_before == 0 and gveto_after == 0): + gate_group = f[args.instrument + '/gating/'] + autogate_times = numpy.unique(gate_group['auto/time'][:]) + if 'file' in gate_group: + detgate_times = gate_group['file/time'][:] + else: + detgate_times = [] + gate_times = numpy.concatenate((autogate_times, detgate_times)) + gveto_idx = veto.indices_within_times( + trigs.end_time, + gate_times + gveto_before, + gate_times + gveto_after + ) + logging.info('%i triggers in gating vetoes', gveto_idx.size) +else: + gveto_idx = numpy.array([], dtype=numpy.uint64) + +if args.veto_file: + logging.info('Getting file vetoes') + # veto_mask is an array of indices into the trigger arrays + # giving the surviving triggers + veto_file_idx, _ = events.veto.indices_within_segments( + trigs.end_time, + [args.veto_file], + ifo=args.instrument, + segment_name=args.veto_segment_name + ) + + logging.info('%i triggers in file-vetoed segments', + veto_file_idx.size) +else: + veto_file_idx = numpy.array([], dtype=numpy.uint64) + +# Work out indices we are going to keep / remove +vetoed_idx = numpy.unique(numpy.concatenate((veto_file_idx, gveto_idx))) +# Needs to be in ascending order +vetoed_idx = numpy.sort(vetoed_idx).astype(numpy.uint64) + +if args.vetoed_time_only and vetoed_idx.size > 0: + logging.info("Applying mask to keep only triggers within vetoed time") + trigs.apply_mask(vetoed_idx) +elif vetoed_idx.size > 0: + logging.info("Applying mask to keep only triggers outwith vetoed time") + veto_mask = numpy.ones(trigs.end_time.size, dtype=bool) + veto_mask[vetoed_idx] = False + trigs.apply_mask(veto_mask) +elif args.vetoed_time_only and vetoed_idx.size == 0: + logging.warning("No triggers exist inside vetoed times") if args.non_coinc_time_only: from pycbc.io.ligolw import LIGOLWContentHandler as h @@ -158,29 +246,56 @@ if args.maximum_duration is not None: trigs.apply_mask(lgc_mask) logging.info('remaining triggers: %s', trigs.mask.sum()) -logging.info('finding loudest clustered events') +logging.info('Finding loudest clustered events') rank_method = stat.get_statistic_from_opts(args, [args.instrument]) -trigs.mask_to_n_loudest_clustered_events(rank_method, n_loudest=num_events) -if len(trigs.stat) < num_events: - num_events = len(trigs.stat) +extra_kwargs = {} +for inputstr in args.statistic_keywords: + try: + key, value = inputstr.split(':') + extra_kwargs[key] = value + except ValueError: + err_txt = "--statistic-keywords must take input in the " \ + "form KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ... " \ + "Received {}".format(args.statistic_keywords) + raise ValueError(err_txt) + +logging.info("Calculating statistic for %d triggers", len(trigs.snr)) +sds = rank_method.single(trigs) +stat = rank_method.rank_stat_single((args.instrument, sds), **extra_kwargs) +logging.info("Clustering events over %.3fs window", args.cluster_window) +cid = coinc.cluster_over_time(stat, trigs.end_time, + args.cluster_window) +trigs.apply_mask(cid) +stat = stat[cid] +if len(trigs.snr) < num_events: + num_events = len(trigs.snr) + +logging.info("Finding the loudest triggers") +loudest_idx = sorted(numpy.argsort(stat)[::-1][:num_events]) +trigs.apply_mask(loudest_idx) +stat = stat[loudest_idx] times = trigs.end_time tids = trigs.template_id # loop over number of loudest events to be followed up -order = trigs.stat.argsort()[::-1] +order = stat.argsort()[::-1] for rank, num_event in enumerate(order): logging.info('Processing event: %s', num_event) files = wf.FileList([]) time = times[num_event] ifo_time = '%s:%s' %(args.instrument, str(time)) - tid = trigs.mask[num_event] + if isinstance(trigs.mask, numpy.ndarray) and trigs.mask.dtype == bool: + tid = numpy.flatnonzero(trigs.mask)[num_event] + else: + tid = trigs.mask[num_event] ifo_tid = '%s:%s' %(args.instrument, str(tid)) layouts += (mini.make_sngl_ifo(workflow, sngl_file, tmpltbank_file, tid, args.output_dir, args.instrument, + statfiles=statfiles, tags=args.tags + [str(rank)]),) files += mini.make_trigger_timeseries(workflow, [sngl_file], ifo_time, args.output_dir, special_tids=ifo_tid, @@ -217,7 +332,6 @@ for rank, num_event in enumerate(order): args.output_dir, [args.instrument], tags=args.tags + [str(rank)]) - files += mini.make_singles_timefreq(workflow, sngl_file, tmpltbank_file, time, args.output_dir, data_segments=insp_data_seglists, diff --git a/bin/plotting/pycbc_plot_singles_vs_params b/bin/plotting/pycbc_plot_singles_vs_params index 8a76a677826..c834037a30d 100644 --- a/bin/plotting/pycbc_plot_singles_vs_params +++ b/bin/plotting/pycbc_plot_singles_vs_params @@ -84,12 +84,29 @@ opts = parser.parse_args() logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) -snr_filter = '(self.snr>%f)' % (opts.min_snr) if opts.min_snr > 0. else None -filts = [f for f in [snr_filter, opts.filter_string] if f is not None] -filter_func = ' & '.join(filts) if filts else None - -trigs = pycbc.io.SingleDetTriggers(opts.single_trig_file, opts.bank_file, - opts.veto_file, opts.segment_name, filter_func, opts.detector) +data_mask = None +if opts.min_snr > 0: + with pycbc.io.HFile(opts.single_trig_file, 'r') as trig_file: + n_triggers_orig = trig_file[f'{opts.detector}/snr'].size + logging.info("Trigger file has %d triggers", n_triggers_orig) + logging.info('Generating trigger mask (on SNR)') + idx, _ = trig_file.select( + lambda snr: snr >= opts.min_snr, + f'{opts.detector}/snr', + return_indices=True + ) + data_mask = np.zeros(n_triggers_orig, dtype=bool) + data_mask[idx] = True + +trigs = pycbc.io.SingleDetTriggers( + opts.single_trig_file, + opts.bank_file, + opts.veto_file, + opts.segment_name, + opts.filter_string, + opts.detector, + premask=data_mask +) x = getattr(trigs, opts.x_var) y = getattr(trigs, opts.y_var) @@ -108,10 +125,14 @@ y = y[mask] hexbin_style = { 'gridsize': opts.grid_size, - # hexbin shows bins with *less* than mincnt as blank - 'mincnt': 0, 'linewidths': 0.03 } + +# In earlier versions mpl will try to take the max over bins with 0 triggers +# and fail, unless we tell it to leave these blank by setting mincnt +if matplotlib.__version__ < '3.8.1': + hexbin_style['mincnt'] = 0 + if opts.log_x: hexbin_style['xscale'] = 'log' if opts.log_y: @@ -137,7 +158,7 @@ elif opts.z_var in ranking.sngls_ranking_function_dict: max_z = z.max() if opts.max_z is None else opts.max_z if max_z / min_z > 10: cb_style['ticks'] = LogLocator(subs=range(10)) - hb = ax.hexbin(x, y, C=z, reduce_C_function=max, **hexbin_style) + hb = ax.hexbin(x, y, C=z, reduce_C_function=np.max, **hexbin_style) fig.colorbar(hb, **cb_style) else: raise RuntimeError('z_var = %s is not recognized!' % (opts.z_var)) diff --git a/bin/pycbc_inspiral b/bin/pycbc_inspiral index dc71bd367d0..4d5e6e9a585 100644 --- a/bin/pycbc_inspiral +++ b/bin/pycbc_inspiral @@ -295,7 +295,7 @@ def template_triggers(t_num): out_vals['time_index'], opt.gps_start_time, opt.sample_rate) #print(idx, out_vals['time_index']) - + out_vals_all.append(copy.deepcopy(out_vals)) #print(out_vals_all) return out_vals_all, tparam @@ -358,10 +358,11 @@ with ctx: if opt.psdvar_segment is not None: logging.info("Calculating PSD variation") psd_var = pycbc.psd.calc_filt_psd_variation(gwstrain, opt.psdvar_segment, - opt.psdvar_short_segment, opt.psdvar_long_segment, + opt.psdvar_short_segment, opt.psdvar_long_segment, opt.psdvar_psd_duration, opt.psdvar_psd_stride, opt.psd_estimation, opt.psdvar_low_freq, opt.psdvar_high_freq) + if opt.enable_q_transform: logging.info("Performing q-transform on analysis segments") q_trans = qtransform.inspiral_qtransform_generator(segments) @@ -453,16 +454,16 @@ with ctx: tsetup = time.time() - tstart tcheckpoint = time.time() - + tanalyze = list(range(tnum_start, len(bank))) n = opt.finalize_events_template_rate n = 1 if n is None else n - tchunks = [tanalyze[i:i + n] for i in range(0, len(tanalyze), n)] - + tchunks = [tanalyze[i:i + n] for i in range(0, len(tanalyze), n)] + mmap = map if opt.multiprocessing_nprocesses: mmap = Pool(opt.multiprocessing_nprocesses).map - + for tchunk in tchunks: data = list(mmap(template_triggers, tchunk)) @@ -472,11 +473,11 @@ with ctx: event_mgr.new_template(tmplt=tparam) for edata in out_vals_all: - event_mgr.add_template_events(names, [edata[n] for n in names]) - + event_mgr.add_template_events(names, [edata[n] for n in names]) + event_mgr.cluster_template_events("time_index", "snr", cluster_window) event_mgr.finalize_template_events() - + if opt.finalize_events_template_rate is not None: event_mgr.consolidate_events(opt, gwstrain=gwstrain) @@ -488,7 +489,7 @@ with ctx: if opt.checkpoint_exit_maxtime and \ (time.time() - tstart > opt.checkpoint_exit_maxtime): event_mgr.save_state(max(tchunk), opt.output + '.checkpoint') - sys.exit(opt.checkpoint_exit_code) + sys.exit(opt.checkpoint_exit_code) event_mgr.consolidate_events(opt, gwstrain=gwstrain) event_mgr.finalize_events() diff --git a/bin/pycbc_live b/bin/pycbc_live index 9089199529b..646d4bebf40 100755 --- a/bin/pycbc_live +++ b/bin/pycbc_live @@ -42,6 +42,7 @@ from pycbc import mchirp_area from pycbc.detector import ppdets from pycbc.filter import resample from pycbc.psd import estimate +from pycbc.psd import variation from pycbc.live import snr_optimizer # Use cached class-based FFTs in the resample and estimate module @@ -996,6 +997,11 @@ parser.add_argument('--embright-massgap-max', type=float, default=5.0, metavar=' 'HasMassGap probability.') parser.add_argument('--skymap-only-ifos', nargs='+', help="Detectors that only contribute in sky localization") +parser.add_argument('--psd-variation', action='store_true', + help="Run the psd variation code to produce psd variation " + "values for each single detector triggers found by " + "the search. Required when using a single detector " + "ranking statistic that includes psd variation.") scheme.insert_processing_option_group(parser) LiveSingle.insert_args(parser) @@ -1010,6 +1016,7 @@ args = parser.parse_args() scheme.verify_processing_options(args, parser) fft.verify_fft_options(args, parser) +Coincer.verify_args(args, parser) ifos = set(args.channel_name.keys()) analyze_singles = LiveSingle.verify_args(args, parser, ifos) @@ -1162,6 +1169,11 @@ with ctx: last_bg_dump_time = int(data_end()) psd_count = {ifo:0 for ifo in ifos} + # Create dicts to track whether the psd has been recalculated and to hold + # psd variation filters + psd_recalculated = {ifo: True for ifo in ifos} + psd_var_filts = {ifo: None for ifo in ifos} + while data_end() < args.end_time: t1 = pycbc.gps_now() logging.info('Analyzing from %s', data_end()) @@ -1177,6 +1189,9 @@ with ctx: ) if status and psd_count[ifo] == 0: status = data_reader[ifo].recalculate_psd() + # If the psd has been recalculated then we need a new + # filter for psd variation calculation + psd_recalculated[ifo] = True psd_count[ifo] = args.psd_recompute_length - 1 elif not status: psd_count[ifo] = 0 @@ -1242,6 +1257,28 @@ with ctx: if len(results[ifo][key]): results[ifo][key] = results[ifo][key][idx] + # Calculate and add the psd variation for the results + if args.psd_variation: + + for ifo in results: + logging.info(f"Calculating PSD Variation Statistic for {ifo}") + + # A new filter is needed if the PSD has been recalculated + if psd_recalculated[ifo] is True: + psd_var_filts[ifo] = variation.live_create_filter(data_reader[ifo].psd, + args.psd_segment_length, + int(args.sample_rate)) + psd_recalculated[ifo] = False + + psd_var_ts = variation.live_calc_psd_variation(data_reader[ifo].strain, + psd_var_filts[ifo], + args.increment) + + psd_var_vals = variation.live_find_var_value(results[ifo], + psd_var_ts) + + results[ifo]['psd_var_val'] = psd_var_vals + # Look for coincident triggers and do background estimation if args.enable_background_estimation: coinc_results = coinc_pool.broadcast(get_coinc, results) diff --git a/bin/pycbc_multi_inspiral b/bin/pycbc_multi_inspiral index 53f940529e8..cd30145c0b6 100755 --- a/bin/pycbc_multi_inspiral +++ b/bin/pycbc_multi_inspiral @@ -280,7 +280,7 @@ with ctx: 'bank_chisq': None, 'bank_chisq_dof': None, 'cont_chisq': None, - 'slide_id': int + 'slide_id': None } ifo_names = sorted(ifo_out_vals.keys()) network_out_types = { @@ -303,13 +303,13 @@ with ctx: 'nifo': None, 'my_network_chisq': None, 'reweighted_snr': None, - 'slide_id': int + 'slide_id': None } network_names = sorted(network_out_vals.keys()) event_mgr = EventManagerCoherent( args, args.instruments, ifo_names, [ifo_out_types[n] for n in ifo_names], network_names, - [network_out_types[n] for n in network_names]) + [network_out_types[n] for n in network_names], time_slides=time_slides) logging.info("Read in template bank") bank = waveform.FilterBank( args.bank_file, flen, delta_f, complex64, low_frequency_cutoff=flow, @@ -473,10 +473,10 @@ with ctx: # If we have triggers above coinc threshold and more than 2 # ifos, then calculate the coherent statistics if len(coinc_idx) != 0 and nifo > 2: + # Plus and cross polarization + fp = {ifo: ap[ifo][position_index][0] for ifo in args.instruments} + fc = {ifo: ap[ifo][position_index][1] for ifo in args.instruments} if args.projection=='left+right': - #Plus and cross polarization - fp = {ifo: ap[ifo][position_index][0] for ifo in args.instruments} - fc = {ifo: ap[ifo][position_index][1] for ifo in args.instruments} # Left polarized coherent SNR project_l = coh.get_projection_matrix( fp, fc, sigma, projection='left') diff --git a/bin/pygrb/pycbc_grb_trig_cluster b/bin/pygrb/pycbc_grb_trig_cluster index 952cad43c2a..0bb8f16a1a7 100644 --- a/bin/pygrb/pycbc_grb_trig_cluster +++ b/bin/pygrb/pycbc_grb_trig_cluster @@ -72,7 +72,8 @@ def slice_hdf5(inputfile, outfile, include, verbose=False): ) for ifo in ifos } - nsets = sum(isinstance(item, h5py.Dataset) for + nsets = sum(isinstance(item, h5py.Dataset) + or isinstance(item, h5py.Group) for group in h5in.values() for item in group.values()) msg = "Slicing {} network events into new file".format(nevents) @@ -88,6 +89,10 @@ def slice_hdf5(inputfile, outfile, include, verbose=False): compression_opts=old.compression_opts, ) bar.update() + elif isinstance(old, h5py.Group): + if "search" in old.name: + h5in.copy(h5in[old.name], h5out, old.name) + bar.update() for ifo in ifos: idx = numpy.in1d(h5in[ifo]["event_id"][()], ifo_index[ifo]) for old in h5in[ifo].values(): @@ -99,6 +104,10 @@ def slice_hdf5(inputfile, outfile, include, verbose=False): compression_opts=old.compression_opts, ) bar.update() + elif isinstance(old, h5py.Group): + if "search" in old.name: + h5in.copy(h5in[old.name], h5out, old.name) + bar.update() bar.close() if verbose: print("Slice written to {}".format(outfile)) diff --git a/bin/pygrb/pycbc_grb_trig_combiner b/bin/pygrb/pycbc_grb_trig_combiner index edddc507627..84cdf0b4ca5 100644 --- a/bin/pygrb/pycbc_grb_trig_combiner +++ b/bin/pygrb/pycbc_grb_trig_combiner @@ -279,6 +279,11 @@ def bin_events(inputfile, bins, outdir, filetag, compression_opts=old.compression_opts, ) bar.update() + elif isinstance(old, h5py.Group): + # If search group, copy it all over + if "search" in old.name: + h5in.copy(h5in[old.name], h5out, old.name) + bar.update() for ifo in ifos: idx = numpy.in1d(h5in[ifo]["event_id"][()], ifo_index[ifo]) for old in h5in[ifo].values(): @@ -290,6 +295,10 @@ def bin_events(inputfile, bins, outdir, filetag, compression_opts=old.compression_opts, ) bar.update() + elif isinstance(old, h5py.Group): + if "search" in old.name: + h5in.copy(h5in[old.name], h5out, old.name) + bar.update() bar.close() if verbose: print("{} written to {}".format(bin_, outf)) @@ -492,9 +501,6 @@ vprint("-- Merging events") if args.short_slides and args.long_slides: raise NotImplementedError -elif args.short_slides and not args.long_slides: - raise NotImplementedError - else: outfilename = "{}-{}_ALL_TIMES-{}-{}.h5".format( args.ifo_tag, args.user_tag, start, end-start, diff --git a/bin/pygrb/pycbc_pygrb_grb_info_table b/bin/pygrb/pycbc_pygrb_grb_info_table index 2973ef5150e..9c88861a00e 100644 --- a/bin/pygrb/pycbc_pygrb_grb_info_table +++ b/bin/pygrb/pycbc_pygrb_grb_info_table @@ -74,10 +74,10 @@ data[0].append(utc_time) headers.append('Coordinated Universal Time') data[0].append(str(opts.ra)) -headers.append('R.A.') +headers.append('R.A. (deg)') data[0].append(str(opts.dec)) -headers.append('Dec') +headers.append('Dec (deg)') data[0].append(str(opts.sky_error)) headers.append('Sky Error') diff --git a/bin/pygrb/pycbc_pygrb_plot_coh_ifosnr b/bin/pygrb/pycbc_pygrb_plot_coh_ifosnr index 1af496ee01d..05a94681162 100644 --- a/bin/pygrb/pycbc_pygrb_plot_coh_ifosnr +++ b/bin/pygrb/pycbc_pygrb_plot_coh_ifosnr @@ -97,16 +97,16 @@ def load_data(input_file, vetoes, ifos, opts, injections=False): sigma_tot = numpy.zeros(num_trigs) # Get parameters necessary for antenna responses - longitude = numpy.degrees(trigs['network/longitude']) - latitude = numpy.degrees(trigs['network/latitude']) + ra = trigs['network/ra'][:] + dec = trigs['network/dec'][:] time = trigs['network/end_time_gc'][:] # Get antenna response based parameters for ifo in ifos: antenna = Detector(ifo) ifo_f_resp = \ - ppu.get_antenna_responses(antenna, longitude, - latitude, time) + ppu.get_antenna_responses(antenna, ra, + dec, time) # Get the average for f_resp_mean and calculate sigma_tot data['f_resp_mean'][ifo] = ifo_f_resp.mean() diff --git a/bin/pygrb/pycbc_pygrb_plot_injs_results b/bin/pygrb/pycbc_pygrb_plot_injs_results index d7c1fccf357..22915b945cd 100644 --- a/bin/pygrb/pycbc_pygrb_plot_injs_results +++ b/bin/pygrb/pycbc_pygrb_plot_injs_results @@ -18,7 +18,7 @@ """ -Plot found/missed injection properties for the triggered search (PyGRB).' +Plot found/missed injection properties for the triggered search (PyGRB). """ import h5py, logging, os.path, argparse, sys diff --git a/bin/pygrb/pycbc_pygrb_plot_skygrid b/bin/pygrb/pycbc_pygrb_plot_skygrid index 3c043dfede6..feb6fda4594 100644 --- a/bin/pygrb/pycbc_pygrb_plot_skygrid +++ b/bin/pygrb/pycbc_pygrb_plot_skygrid @@ -91,8 +91,8 @@ trig_data = load_data(trig_file, vetoes) # Generate sky grid plot # -xlabel = "Longitude (Degrees)" -ylabel = "Latitude (Degrees)" +xlabel = "RA (deg)" +ylabel = "Dec (deg)" if opts.verbose: sys.stdout.write("\nPlotting...\n") @@ -103,7 +103,8 @@ fig = plt.figure() ax = fig.gca() ax.set_xlabel(xlabel)#, fontsize=16) ax.set_ylabel(ylabel)#, fontsize=16) -ax.plot(trig_data['network/longitude'][:], trig_data['network/latitude'][:], +# Trigger RA/Dec data is stored in radians, so convert to degrees +ax.plot(numpy.rad2deg(trig_data['network/ra']), numpy.rad2deg(trig_data['network/dec']), 'ko', markerfacecolor='blue') # Wrap up save_fig_with_metadata(fig, outfile, cmd=' '.join(sys.argv), diff --git a/bin/pygrb/pycbc_pygrb_plot_stats_distribution b/bin/pygrb/pycbc_pygrb_plot_stats_distribution index e56dc6584d5..e36ce7ab1b1 100644 --- a/bin/pygrb/pycbc_pygrb_plot_stats_distribution +++ b/bin/pygrb/pycbc_pygrb_plot_stats_distribution @@ -34,10 +34,6 @@ import pycbc.version from pycbc import init_logging from pycbc.results import save_fig_with_metadata from pycbc.results import pygrb_postprocessing_utils as ppu -try: - from glue.ligolw import lsctables -except ImportError: - pass plt.switch_backend('Agg') rc("image", cmap="cividis_r") @@ -86,8 +82,8 @@ ifos, vetoes = ppu.extract_ifos_and_vetoes(trig_file, opts.veto_files, # Load triggers, time-slides, and segment dictionary logging.info("Loading triggers.") -trigs = ppu.load_xml_table(trig_file, lsctables.MultiInspiralTable.tableName) -logging.info("%d triggers loaded.", len(trigs)) +trigs = ppu.load_triggers(trig_file, vetoes) +logging.info("%d triggers loaded.", trigs['network/event_id'].len()) logging.info("Loading timeslides.") slide_dict = ppu.load_time_slides(trig_file) logging.info("Loading segments.") diff --git a/bin/pygrb/pycbc_pygrb_pp_workflow b/bin/pygrb/pycbc_pygrb_pp_workflow index 7e51112c303..71734fef27e 100644 --- a/bin/pygrb/pycbc_pygrb_pp_workflow +++ b/bin/pygrb/pycbc_pygrb_pp_workflow @@ -387,8 +387,8 @@ ini_file = _workflow.FileList([_workflow.File(wflow.ifos, '', layout.single_layout(base, ini_file) # Create versioning information -wf.make_versioning_page( - _workflow, +_workflow.make_versioning_page( + wflow, wflow.cp, rdir['workflow/version'], ) diff --git a/bin/workflows/pycbc_make_offline_search_workflow b/bin/workflows/pycbc_make_offline_search_workflow index 9b3a7da8678..14f0a9bcaa1 100755 --- a/bin/workflows/pycbc_make_offline_search_workflow +++ b/bin/workflows/pycbc_make_offline_search_workflow @@ -66,7 +66,7 @@ wf.add_workflow_settings_cli(parser) args = parser.parse_args() # By default, we do logging.info, each --verbose adds a level of verbosity -logging_level = args.verbose + 1 if args.verbose else logging.INFO +logging_level = args.verbose + 1 if args.verbose else 1 pycbc.init_logging(logging_level) container = wf.Workflow(args, args.workflow_name) @@ -188,7 +188,7 @@ insps = wf.merge_single_detector_hdf_files(workflow, hdfbank, tags=['full_data']) # setup sngl trigger distribution fitting jobs -# 'statfiles' is list of files used in calculating coinc statistic +# 'statfiles' is list of files used in calculating statistic # 'dqfiles' is the subset of files containing data quality information statfiles = [] dqfiles = [] @@ -458,7 +458,8 @@ for insp_file in full_insps: wf.setup_single_det_minifollowups\ (workflow, insp_file, hdfbank, insp_files_seg_file, data_analysed_name, trig_generated_name, 'daxes', currdir, - veto_file=censored_veto, veto_segment_name='closed_box', + statfiles=wf.FileList(statfiles), + fg_file=censored_veto, fg_name='closed_box', tags=insp_file.tags + [subsec]) ##################### COINC FULL_DATA plots ################################### diff --git a/docs/release.rst b/docs/release.rst index 130539953a7..eeeb74b0a94 100644 --- a/docs/release.rst +++ b/docs/release.rst @@ -31,6 +31,7 @@ To create a new PyCBC release: old production release series. Creating the release will trigger a Travis build that updates CVMFS, Docker, and PyPI with the release. +Please ensure that you check the outputs of these builds and urgently report any errors to other maintainers, you will be emailed if the builds fails. ------------------------------------------------ Backporting Bug Fixes to Previous Release Series diff --git a/examples/live/run.sh b/examples/live/run.sh index 2e5e81615d4..82b79db5a24 100755 --- a/examples/live/run.sh +++ b/examples/live/run.sh @@ -177,7 +177,7 @@ python -m mpi4py `which pycbc_live` \ --max-batch-size 16777216 \ --output-path output \ --day-hour-output-prefix \ ---sngl-ranking newsnr_sgveto \ +--sngl-ranking newsnr_sgveto_psdvar_threshold \ --ranking-statistic phasetd \ --statistic-files statHL.hdf statHV.hdf statLV.hdf \ --sgchisq-snr-threshold 4 \ @@ -204,6 +204,7 @@ python -m mpi4py `which pycbc_live` \ --single-duration-threshold 7 \ --single-reduced-chisq-threshold 2 \ --single-fit-file single_trigger_fits.hdf \ +--psd-variation \ --verbose # If you would like to use the pso optimizer, change --optimizer to pso diff --git a/pycbc/__init__.py b/pycbc/__init__.py index 53c35debfff..18276a6b630 100644 --- a/pycbc/__init__.py +++ b/pycbc/__init__.py @@ -78,8 +78,7 @@ def init_logging(verbose=False, format='%(asctime)s %(message)s'): What level to set the verbosity level to. Accepts either a boolean or an integer representing the level to set. If True/False will set to ``logging.INFO``/``logging.WARN``. For higher logging levels, pass - an integer representing the level to set (see the ``logging`` module - for details). Default is ``False`` (``logging.WARN``). + an integer representing the level to set. (1 = INFO, 2 = DEBUG). format : str, optional The format to use for logging messages. """ @@ -96,15 +95,11 @@ def sig_handler(signum, frame): signal.signal(signal.SIGUSR1, sig_handler) - if not verbose: - initial_level = logging.WARN - elif int(verbose) == 1: - initial_level = logging.INFO - else: - initial_level = int(verbose) - + # See https://docs.python.org/3/library/logging.html#levels + # for log level definitions logger = logging.getLogger() - logger.setLevel(initial_level) + verbose_int = 0 if verbose is None else int(verbose) + logger.setLevel(logging.WARNING - verbose_int * 10) # Initial setting sh = logging.StreamHandler() logger.addHandler(sh) sh.setFormatter(LogFormatter(fmt=format)) diff --git a/pycbc/conversions.py b/pycbc/conversions.py index 167127eb8f6..20c78f88949 100644 --- a/pycbc/conversions.py +++ b/pycbc/conversions.py @@ -562,7 +562,7 @@ def remnant_mass_from_mass1_mass2_spherical_spin_eos( # If a maximum NS mass is not provided, accept all values and # let the EOS handle this (in ns.initialize_eos) if ns_bh_mass_boundary is None: - mask = numpy.ones(ensurearray(mass2).size[0], dtype=bool) + mask = numpy.ones(ensurearray(mass2)[0].size, dtype=bool) # Otherwise perform the calculation only for small enough NS masses... else: mask = mass2 <= ns_bh_mass_boundary diff --git a/pycbc/coordinates/__init__.py b/pycbc/coordinates/__init__.py new file mode 100644 index 00000000000..e1d7c81d90d --- /dev/null +++ b/pycbc/coordinates/__init__.py @@ -0,0 +1,37 @@ +# Copyright (C) 2023 Shichao Wu, Alex Nitz +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +""" +This modules provides functions for base coordinate transformations, and +more advanced transformations between ground-based detectors and space-borne +detectors. +""" + +from pycbc.coordinates.base import * +from pycbc.coordinates.space import * + + +__all__ = ['cartesian_to_spherical_rho', 'cartesian_to_spherical_azimuthal', + 'cartesian_to_spherical_polar', 'cartesian_to_spherical', + 'spherical_to_cartesian', + 'TIME_OFFSET_20_DEGREES', + 'localization_to_propagation_vector', + 'propagation_vector_to_localization', 'polarization_newframe', + 't_lisa_from_ssb', 't_ssb_from_t_lisa', + 'ssb_to_lisa', 'lisa_to_ssb', + 'rotation_matrix_ssb_to_lisa', 'rotation_matrix_ssb_to_geo', + 'lisa_position_ssb', 'earth_position_ssb', + 't_geo_from_ssb', 't_ssb_from_t_geo', 'ssb_to_geo', 'geo_to_ssb', + 'lisa_to_geo', 'geo_to_lisa', + ] diff --git a/pycbc/coordinates.py b/pycbc/coordinates/base.py similarity index 92% rename from pycbc/coordinates.py rename to pycbc/coordinates/base.py index dab734b1aa5..48dffe8573b 100644 --- a/pycbc/coordinates.py +++ b/pycbc/coordinates/base.py @@ -14,7 +14,18 @@ # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -""" Coordinate transformations. + + +# +# ============================================================================= +# +# Preamble +# +# ============================================================================= +# +""" +Base coordinate transformations, this module provides transformations between +cartesian and spherical coordinates. """ import numpy @@ -60,6 +71,7 @@ def cartesian_to_spherical_azimuthal(x, y): phi = numpy.arctan2(y, x) return phi % (2 * numpy.pi) + def cartesian_to_spherical_polar(x, y, z): """ Calculates the polar angle in spherical coordinates from Cartesian coordinates. The polar angle is in [0,pi]. @@ -141,7 +153,8 @@ def spherical_to_cartesian(rho, phi, theta): z = rho * numpy.cos(theta) return x, y, z + __all__ = ['cartesian_to_spherical_rho', 'cartesian_to_spherical_azimuthal', 'cartesian_to_spherical_polar', 'cartesian_to_spherical', 'spherical_to_cartesian', - ] + ] diff --git a/pycbc/coordinates/space.py b/pycbc/coordinates/space.py new file mode 100644 index 00000000000..4a027746b0b --- /dev/null +++ b/pycbc/coordinates/space.py @@ -0,0 +1,949 @@ +# Copyright (C) 2023 Shichao Wu, Alex Nitz +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +# +# ============================================================================= +# +# Preamble +# +# ============================================================================= +# +""" +This module provides coordinate transformations related to space-borne +detectors, such as coordinate transformations between space-borne detectors +and ground-based detectors. Note that current LISA orbit used in this module +is a circular orbit, need to be replaced by a more realistic and general orbit +model in the near future. +""" + +import numpy as np +from scipy.spatial.transform import Rotation +from scipy.optimize import fsolve +from astropy import units +from astropy.constants import c, au +from astropy.time import Time +from astropy.coordinates import BarycentricMeanEcliptic, PrecessedGeocentric +from astropy.coordinates import get_body_barycentric +from astropy.coordinates import SkyCoord +from astropy.coordinates.builtin_frames import ecliptic_transforms + +# This constant makes sure LISA is behind the Earth by 19-23 degrees. +# Making this a stand-alone constant will also make it callable by +# the waveform plugin and PE config file. In the unit of 's'. +TIME_OFFSET_20_DEGREES = 7365189.431698299 + +# "rotation_matrix_ssb_to_lisa" and "lisa_position_ssb" should be +# more general for other detectors in the near future. + + +def rotation_matrix_ssb_to_lisa(alpha): + """ The rotation matrix (of frame basis) from SSB frame to LISA frame. + This function assumes the angle between LISA plane and the ecliptic + is 60 degrees, and the period of LISA's self-rotation and orbital + revolution is both one year. + + Parameters + ---------- + alpha : float + The angular displacement of LISA in SSB frame. + In the unit of 'radian'. + + Returns + ------- + r_total : numpy.array + A 3x3 rotation matrix from SSB frame to LISA frame. + """ + r = Rotation.from_rotvec([ + [0, 0, alpha], + [0, -np.pi/3, 0], + [0, 0, -alpha] + ]).as_matrix() + r_total = np.array(r[0]) @ np.array(r[1]) @ np.array(r[2]) + + return r_total + + +def lisa_position_ssb(t_lisa, t0=TIME_OFFSET_20_DEGREES): + """ Calculating the position vector and angular displacement of LISA + in the SSB frame, at a given time. This function assumes LISA's barycenter + is orbiting around a circular orbit within the ecliptic behind the Earth. + The period of it is one year. + + Parameters + ---------- + t_lisa : float + The time when a GW signal arrives at the origin of LISA frame, + or any other time you want. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + (p, alpha) : tuple + p : numpy.array + The position vector of LISA in the SSB frame. In the unit of 'm'. + alpha : float + The angular displacement of LISA in the SSB frame. + In the unit of 'radian'. + """ + OMEGA_0 = 1.99098659277e-7 + R_ORBIT = au.value + alpha = np.mod(OMEGA_0 * (t_lisa + t0), 2*np.pi) + p = np.array([[R_ORBIT * np.cos(alpha)], + [R_ORBIT * np.sin(alpha)], + [0]], dtype=object) + return (p, alpha) + + +def localization_to_propagation_vector(longitude, latitude, + use_astropy=True, frame=None): + """ Converting the sky localization to the corresponding + propagation unit vector of a GW signal. + + Parameters + ---------- + longitude : float + The longitude, in the unit of 'radian'. + latitude : float + The latitude, in the unit of 'radian'. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. + + Returns + ------- + [[x], [y], [z]] : numpy.array + The propagation unit vector of that GW signal. + """ + if use_astropy: + x = -frame.cartesian.x.value + y = -frame.cartesian.y.value + z = -frame.cartesian.z.value + else: + x = -np.cos(latitude) * np.cos(longitude) + y = -np.cos(latitude) * np.sin(longitude) + z = -np.sin(latitude) + v = np.array([[x], [y], [z]]) + + return v / np.linalg.norm(v) + + +def propagation_vector_to_localization(k, use_astropy=True, frame=None): + """ Converting the propagation unit vector to the corresponding + sky localization of a GW signal. + + Parameters + ---------- + k : numpy.array + The propagation unit vector of a GW signal. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. + + Returns + ------- + (longitude, latitude) : tuple + The sky localization of that GW signal. + """ + if use_astropy: + try: + longitude = frame.lon.rad + latitude = frame.lat.rad + except AttributeError: + longitude = frame.ra.rad + latitude = frame.dec.rad + else: + # latitude already within [-pi/2, pi/2] + latitude = np.float64(np.arcsin(-k[2])) + longitude = np.float64(np.arctan2(-k[1]/np.cos(latitude), + -k[0]/np.cos(latitude))) + # longitude should within [0, 2*pi) + longitude = np.mod(longitude, 2*np.pi) + + return (longitude, latitude) + + +def polarization_newframe(polarization, k, rotation_matrix, use_astropy=True, + old_frame=None, new_frame=None): + """ Converting a polarization angle from a frame to a new frame + by using rotation matrix method. + + Parameters + ---------- + polarization : float + The polarization angle in the old frame, in the unit of 'radian'. + k : numpy.array + The propagation unit vector of a GW signal in the old frame. + rotation_matrix : numpy.array + The rotation matrix (of frame basis) from the old frame to + the new frame. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + old_frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. + new_frame : astropy.coordinates + The frame from astropy.coordinates if use_astropy is True, + the default is None. The new frame for the new polarization + angle. + + Returns + ------- + polarization_new_frame : float + The polarization angle in the new frame of that GW signal. + """ + longitude, _ = propagation_vector_to_localization( + k, use_astropy, old_frame) + u = np.array([[np.sin(longitude)], [-np.cos(longitude)], [0]]) + rotation_vector = polarization * k + rotation_polarization = Rotation.from_rotvec(rotation_vector.T[0]) + p = rotation_polarization.apply(u.T[0]).reshape(3, 1) + p_newframe = rotation_matrix.T @ p + k_newframe = rotation_matrix.T @ k + longitude_newframe, latitude_newframe = \ + propagation_vector_to_localization(k_newframe, use_astropy, new_frame) + u_newframe = np.array([[np.sin(longitude_newframe)], + [-np.cos(longitude_newframe)], [0]]) + v_newframe = np.array([ + [-np.sin(latitude_newframe) * np.cos(longitude_newframe)], + [-np.sin(latitude_newframe) * np.sin(longitude_newframe)], + [np.cos(latitude_newframe)]]) + p_dot_u_newframe = np.vdot(p_newframe, u_newframe) + p_dot_v_newframe = np.vdot(p_newframe, v_newframe) + polarization_new_frame = np.arctan2(p_dot_v_newframe, p_dot_u_newframe) + polarization_new_frame = np.mod(polarization_new_frame, 2*np.pi) + # avoid the round error + if polarization_new_frame == 2*np.pi: + polarization_new_frame = 0 + + return polarization_new_frame + + +def t_lisa_from_ssb(t_ssb, longitude_ssb, latitude_ssb, + t0=TIME_OFFSET_20_DEGREES): + """ Calculating the time when a GW signal arrives at the barycenter + of LISA, by using the time and sky localization in SSB frame. + + Parameters + ---------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + t_lisa : float + The time when a GW signal arrives at the origin of LISA frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy=False) + + def equation(t_lisa): + # LISA is moving, when GW arrives at LISA center, + # time is t_lisa, not t_ssb. + p = lisa_position_ssb(t_lisa, t0)[0] + return t_lisa - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_ssb)[0] + + +def t_ssb_from_t_lisa(t_lisa, longitude_ssb, latitude_ssb, + t0=TIME_OFFSET_20_DEGREES): + """ Calculating the time when a GW signal arrives at the barycenter + of SSB, by using the time in LISA frame and sky localization in SSB frame. + + Parameters + ---------- + t_lisa : float + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy=False) + # LISA is moving, when GW arrives at LISA center, + # time is t_lisa, not t_ssb. + p = lisa_position_ssb(t_lisa, t0)[0] + + def equation(t_ssb): + return t_lisa - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_lisa)[0] + + +def ssb_to_lisa(t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, + t0=TIME_OFFSET_20_DEGREES): + """ Converting the arrive time, the sky localization, and the polarization + from the SSB frame to the LISA frame. + + Parameters + ---------- + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + (t_lisa, longitude_lisa, latitude_lisa, polarization_lisa) : tuple + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_lisa : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + """ + if not isinstance(t_ssb, np.ndarray): + t_ssb = np.array([t_ssb]) + if not isinstance(longitude_ssb, np.ndarray): + longitude_ssb = np.array([longitude_ssb]) + if not isinstance(latitude_ssb, np.ndarray): + latitude_ssb = np.array([latitude_ssb]) + if not isinstance(polarization_ssb, np.ndarray): + polarization_ssb = np.array([polarization_ssb]) + num = len(t_ssb) + t_lisa, longitude_lisa = np.zeros(num), np.zeros(num) + latitude_lisa, polarization_lisa = np.zeros(num), np.zeros(num) + + for i in range(num): + if longitude_ssb[i] < 0 or longitude_ssb[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_ssb[i] < -np.pi/2 or latitude_ssb[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_ssb[i] < 0 or polarization_ssb[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + t_lisa[i] = t_lisa_from_ssb(t_ssb[i], longitude_ssb[i], + latitude_ssb[i], t0) + k_ssb = localization_to_propagation_vector( + longitude_ssb[i], latitude_ssb[i], use_astropy=False) + # Although t_lisa calculated above using the corrected LISA position + # vector by adding t0, it corresponds to the true t_ssb, not t_ssb+t0, + # we need to include t0 again to correct LISA position. + alpha = lisa_position_ssb(t_lisa[i], t0)[1] + rotation_matrix_lisa = rotation_matrix_ssb_to_lisa(alpha) + k_lisa = rotation_matrix_lisa.T @ k_ssb + longitude_lisa[i], latitude_lisa[i] = \ + propagation_vector_to_localization(k_lisa, use_astropy=False) + polarization_lisa[i] = polarization_newframe( + polarization_ssb[i], k_ssb, rotation_matrix_lisa, + use_astropy=False) + + if num == 1: + params_lisa = (t_lisa[0], longitude_lisa[0], + latitude_lisa[0], polarization_lisa[0]) + else: + params_lisa = (t_lisa, longitude_lisa, + latitude_lisa, polarization_lisa) + + return params_lisa + + +def lisa_to_ssb(t_lisa, longitude_lisa, latitude_lisa, polarization_lisa, + t0=TIME_OFFSET_20_DEGREES): + """ Converting the arrive time, the sky localization, and the polarization + from the LISA frame to the SSB frame. + + Parameters + ---------- + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_lisa : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + + Returns + ------- + (t_ssb, longitude_ssb, latitude_ssb, polarization_ssb) : tuple + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + """ + if not isinstance(t_lisa, np.ndarray): + t_lisa = np.array([t_lisa]) + if not isinstance(longitude_lisa, np.ndarray): + longitude_lisa = np.array([longitude_lisa]) + if not isinstance(latitude_lisa, np.ndarray): + latitude_lisa = np.array([latitude_lisa]) + if not isinstance(polarization_lisa, np.ndarray): + polarization_lisa = np.array([polarization_lisa]) + num = len(t_lisa) + t_ssb, longitude_ssb = np.zeros(num), np.zeros(num) + latitude_ssb, polarization_ssb = np.zeros(num), np.zeros(num) + + for i in range(num): + if longitude_lisa[i] < 0 or longitude_lisa[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_lisa[i] < -np.pi/2 or latitude_lisa[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_lisa[i] < 0 or polarization_lisa[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + k_lisa = localization_to_propagation_vector( + longitude_lisa[i], latitude_lisa[i], use_astropy=False) + alpha = lisa_position_ssb(t_lisa[i], t0)[1] + rotation_matrix_lisa = rotation_matrix_ssb_to_lisa(alpha) + k_ssb = rotation_matrix_lisa @ k_lisa + longitude_ssb[i], latitude_ssb[i] = \ + propagation_vector_to_localization(k_ssb, use_astropy=False) + t_ssb[i] = t_ssb_from_t_lisa(t_lisa[i], longitude_ssb[i], + latitude_ssb[i], t0) + polarization_ssb[i] = polarization_newframe( + polarization_lisa[i], k_lisa, rotation_matrix_lisa.T, + use_astropy=False) + + if num == 1: + params_ssb = (t_ssb[0], longitude_ssb[0], + latitude_ssb[0], polarization_ssb[0]) + else: + params_ssb = (t_ssb, longitude_ssb, + latitude_ssb, polarization_ssb) + + return params_ssb + + +def rotation_matrix_ssb_to_geo(epsilon=np.deg2rad(23.439281)): + """ The rotation matrix (of frame basis) from SSB frame to + geocentric frame. + + Parameters + ---------- + epsilon : float + The Earth's axial tilt (obliquity), in the unit of 'radian'. + + Returns + ------- + r : numpy.array + A 3x3 rotation matrix from SSB frame to geocentric frame. + """ + r = Rotation.from_rotvec([ + [-epsilon, 0, 0] + ]).as_matrix() + + return np.array(r[0]) + + +def earth_position_ssb(t_geo): + """ Calculating the position vector and angular displacement of the Earth + in the SSB frame, at a given time. By using Astropy. + + Parameters + ---------- + t_geo : float + The time when a GW signal arrives at the origin of geocentric frame, + or any other time you want. + + Returns + ------- + (p, alpha) : tuple + p : numpy.array + The position vector of the Earth in the SSB frame. In the unit of 'm'. + alpha : float + The angular displacement of the Earth in the SSB frame. + In the unit of 'radian'. + """ + t = Time(t_geo, format='gps') + pos = get_body_barycentric('earth', t) + # BarycentricMeanEcliptic doesn't have obstime attribute, + # it's a good inertial frame, but ICRS is not. + icrs_coord = SkyCoord(pos, frame='icrs', obstime=t) + bme_coord = icrs_coord.transform_to( + BarycentricMeanEcliptic(equinox='J2000')) + x = bme_coord.cartesian.x.to(units.m).value + y = bme_coord.cartesian.y.to(units.m).value + z = bme_coord.cartesian.z.to(units.m).value + p = np.array([[x], [y], [z]]) + alpha = bme_coord.lon.rad + + return (p, alpha) + + +def t_geo_from_ssb(t_ssb, longitude_ssb, latitude_ssb, + use_astropy=True, frame=None): + """ Calculating the time when a GW signal arrives at the barycenter + of the Earth, by using the time and sky localization in SSB frame. + + Parameters + ---------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + + Returns + ------- + t_geo : float + The time when a GW signal arrives at the origin of geocentric frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy, frame) + + def equation(t_geo): + # Earth is moving, when GW arrives at Earth center, + # time is t_geo, not t_ssb. + p = earth_position_ssb(t_geo)[0] + return t_geo - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_ssb)[0] + + +def t_ssb_from_t_geo(t_geo, longitude_ssb, latitude_ssb, + use_astropy=True, frame=None): + """ Calculating the time when a GW signal arrives at the barycenter + of SSB, by using the time in geocentric frame and sky localization + in SSB frame. + + Parameters + ---------- + t_geo : float + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_ssb : float + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + + Returns + ------- + t_ssb : float + The time when a GW signal arrives at the origin of SSB frame. + """ + k = localization_to_propagation_vector( + longitude_ssb, latitude_ssb, use_astropy, frame) + # Earth is moving, when GW arrives at Earth center, + # time is t_geo, not t_ssb. + p = earth_position_ssb(t_geo)[0] + + def equation(t_ssb): + return t_geo - t_ssb - np.vdot(k, p) / c.value + + return fsolve(equation, t_geo)[0] + + +def ssb_to_geo(t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, + use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the SSB frame to the geocentric frame. + + Parameters + ---------- + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_geo, longitude_geo, latitude_geo, polarization_geo) : tuple + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + """ + if not isinstance(t_ssb, np.ndarray): + t_ssb = np.array([t_ssb]) + if not isinstance(longitude_ssb, np.ndarray): + longitude_ssb = np.array([longitude_ssb]) + if not isinstance(latitude_ssb, np.ndarray): + latitude_ssb = np.array([latitude_ssb]) + if not isinstance(polarization_ssb, np.ndarray): + polarization_ssb = np.array([polarization_ssb]) + num = len(t_ssb) + t_geo = np.full(num, np.nan) + longitude_geo = np.full(num, np.nan) + latitude_geo = np.full(num, np.nan) + polarization_geo = np.full(num, np.nan) + + for i in range(num): + if longitude_ssb[i] < 0 or longitude_ssb[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_ssb[i] < -np.pi/2 or latitude_ssb[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_ssb[i] < 0 or polarization_ssb[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + + if use_astropy: + # BarycentricMeanEcliptic doesn't have obstime attribute, + # it's a good inertial frame, but PrecessedGeocentric is not. + bme_coord = BarycentricMeanEcliptic( + lon=longitude_ssb[i]*units.radian, + lat=latitude_ssb[i]*units.radian, + equinox='J2000') + t_geo[i] = t_geo_from_ssb(t_ssb[i], longitude_ssb[i], + latitude_ssb[i], use_astropy, bme_coord) + geo_sky = bme_coord.transform_to(PrecessedGeocentric( + equinox='J2000', obstime=Time(t_geo[i], format='gps'))) + longitude_geo[i] = geo_sky.ra.rad + latitude_geo[i] = geo_sky.dec.rad + k_geo = localization_to_propagation_vector( + longitude_geo[i], latitude_geo[i], + use_astropy, geo_sky) + k_ssb = localization_to_propagation_vector( + None, None, use_astropy, bme_coord) + rotation_matrix_geo = \ + ecliptic_transforms.icrs_to_baryecliptic( + from_coo=None, + to_frame=BarycentricMeanEcliptic(equinox='J2000')) + polarization_geo[i] = polarization_newframe( + polarization_ssb[i], k_ssb, + rotation_matrix_geo, use_astropy, + old_frame=bme_coord, + new_frame=geo_sky) + else: + t_geo[i] = t_geo_from_ssb(t_ssb[i], longitude_ssb[i], + latitude_ssb[i], use_astropy) + rotation_matrix_geo = rotation_matrix_ssb_to_geo() + k_ssb = localization_to_propagation_vector( + longitude_ssb[i], latitude_ssb[i], + use_astropy) + k_geo = rotation_matrix_geo.T @ k_ssb + longitude_geo[i], latitude_geo[i] = \ + propagation_vector_to_localization(k_geo, use_astropy) + polarization_geo[i] = polarization_newframe( + polarization_ssb[i], k_ssb, + rotation_matrix_geo, use_astropy) + + # As mentioned in LDC manual, the p,q vectors are opposite between + # LDC and LAL conventions, see Sec 4.1.5 in . + polarization_geo[i] = np.mod(polarization_geo[i]+np.pi, 2*np.pi) + + if num == 1: + params_geo = (t_geo[0], longitude_geo[0], + latitude_geo[0], polarization_geo[0]) + else: + params_geo = (t_geo, longitude_geo, + latitude_geo, polarization_geo) + + return params_geo + + +def geo_to_ssb(t_geo, longitude_geo, latitude_geo, polarization_geo, + use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the geocentric frame to the SSB frame. + + Parameters + ---------- + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_ssb, longitude_ssb, latitude_ssb, polarization_ssb) : tuple + t_ssb : float or numpy.array + The time when a GW signal arrives at the origin of SSB frame. + In the unit of 's'. + longitude_ssb : float or numpy.array + The ecliptic longitude of a GW signal in SSB frame. + In the unit of 'radian'. + latitude_ssb : float or numpy.array + The ecliptic latitude of a GW signal in SSB frame. + In the unit of 'radian'. + polarization_ssb : float or numpy.array + The polarization angle of a GW signal in SSB frame. + In the unit of 'radian'. + """ + if not isinstance(t_geo, np.ndarray): + t_geo = np.array([t_geo]) + if not isinstance(longitude_geo, np.ndarray): + longitude_geo = np.array([longitude_geo]) + if not isinstance(latitude_geo, np.ndarray): + latitude_geo = np.array([latitude_geo]) + if not isinstance(polarization_geo, np.ndarray): + polarization_geo = np.array([polarization_geo]) + num = len(t_geo) + t_ssb = np.full(num, np.nan) + longitude_ssb = np.full(num, np.nan) + latitude_ssb = np.full(num, np.nan) + polarization_ssb = np.full(num, np.nan) + + for i in range(num): + if longitude_geo[i] < 0 or longitude_geo[i] >= 2*np.pi: + raise ValueError("Longitude should within [0, 2*pi).") + if latitude_geo[i] < -np.pi/2 or latitude_geo[i] > np.pi/2: + raise ValueError("Latitude should within [-pi/2, pi/2].") + if polarization_geo[i] < 0 or polarization_geo[i] >= 2*np.pi: + raise ValueError("Polarization angle should within [0, 2*pi).") + + if use_astropy: + # BarycentricMeanEcliptic doesn't have obstime attribute, + # it's a good inertial frame, but PrecessedGeocentric is not. + geo_coord = PrecessedGeocentric( + ra=longitude_geo[i]*units.radian, + dec=latitude_geo[i]*units.radian, + equinox='J2000', + obstime=Time(t_geo[i], format='gps')) + ssb_sky = geo_coord.transform_to( + BarycentricMeanEcliptic(equinox='J2000')) + longitude_ssb[i] = ssb_sky.lon.rad + latitude_ssb[i] = ssb_sky.lat.rad + k_ssb = localization_to_propagation_vector( + longitude_ssb[i], latitude_ssb[i], + use_astropy, ssb_sky) + k_geo = localization_to_propagation_vector( + None, None, use_astropy, geo_coord) + rotation_matrix_geo = \ + ecliptic_transforms.icrs_to_baryecliptic( + from_coo=None, + to_frame=BarycentricMeanEcliptic(equinox='J2000')) + t_ssb[i] = t_ssb_from_t_geo(t_geo[i], longitude_ssb[i], + latitude_ssb[i], use_astropy, + ssb_sky) + polarization_ssb[i] = polarization_newframe( + polarization_geo[i], k_geo, + rotation_matrix_geo.T, + use_astropy, + old_frame=geo_coord, + new_frame=ssb_sky) + else: + rotation_matrix_geo = rotation_matrix_ssb_to_geo() + k_geo = localization_to_propagation_vector( + longitude_geo[i], latitude_geo[i], use_astropy) + k_ssb = rotation_matrix_geo @ k_geo + longitude_ssb[i], latitude_ssb[i] = \ + propagation_vector_to_localization(k_ssb, use_astropy) + t_ssb[i] = t_ssb_from_t_geo(t_geo[i], longitude_ssb[i], + latitude_ssb[i], use_astropy) + polarization_ssb[i] = polarization_newframe( + polarization_geo[i], k_geo, + rotation_matrix_geo.T, use_astropy) + + # As mentioned in LDC manual, the p,q vectors are opposite between + # LDC and LAL conventions, see Sec 4.1.5 in . + polarization_ssb[i] = np.mod(polarization_ssb[i]-np.pi, 2*np.pi) + + if num == 1: + params_ssb = (t_ssb[0], longitude_ssb[0], + latitude_ssb[0], polarization_ssb[0]) + else: + params_ssb = (t_ssb, longitude_ssb, + latitude_ssb, polarization_ssb) + + return params_ssb + + +def lisa_to_geo(t_lisa, longitude_lisa, latitude_lisa, polarization_lisa, + t0=TIME_OFFSET_20_DEGREES, use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the LISA frame to the geocentric frame. + + Parameters + ---------- + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_lisa : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_geo, longitude_geo, latitude_geo, polarization_geo) : tuple + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The ecliptic longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The ecliptic latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + """ + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb = lisa_to_ssb( + t_lisa, longitude_lisa, latitude_lisa, polarization_lisa, t0) + t_geo, longitude_geo, latitude_geo, polarization_geo = ssb_to_geo( + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, use_astropy) + + return (t_geo, longitude_geo, latitude_geo, polarization_geo) + + +def geo_to_lisa(t_geo, longitude_geo, latitude_geo, polarization_geo, + t0=TIME_OFFSET_20_DEGREES, use_astropy=True): + """ Converting the arrive time, the sky localization, and the polarization + from the geocentric frame to the LISA frame. + + Parameters + ---------- + t_geo : float or numpy.array + The time when a GW signal arrives at the origin of geocentric frame. + In the unit of 's'. + longitude_geo : float or numpy.array + The longitude of a GW signal in geocentric frame. + In the unit of 'radian'. + latitude_geo : float or numpy.array + The latitude of a GW signal in geocentric frame. + In the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in geocentric frame. + In the unit of 'radian'. + t0 : float + The initial time offset of LISA, in the unit of 's', + default is 7365189.431698299. This makes sure LISA is behind + the Earth by 19-23 degrees. + use_astropy : bool + Using Astropy to calculate the sky localization or not. + Default is True. + + Returns + ------- + (t_lisa, longitude_lisa, latitude_lisa, polarization_lisa) : tuple + t_lisa : float or numpy.array + The time when a GW signal arrives at the origin of LISA frame. + In the unit of 's'. + longitude_lisa : float or numpy.array + The longitude of a GW signal in LISA frame, in the unit of 'radian'. + latitude_lisa : float or numpy.array + The latitude of a GW signal in LISA frame, in the unit of 'radian'. + polarization_geo : float or numpy.array + The polarization angle of a GW signal in LISA frame. + In the unit of 'radian'. + """ + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb = geo_to_ssb( + t_geo, longitude_geo, latitude_geo, polarization_geo, use_astropy) + t_lisa, longitude_lisa, latitude_lisa, polarization_lisa = ssb_to_lisa( + t_ssb, longitude_ssb, latitude_ssb, polarization_ssb, t0) + + return (t_lisa, longitude_lisa, latitude_lisa, polarization_lisa) + + +__all__ = ['TIME_OFFSET_20_DEGREES', + 'localization_to_propagation_vector', + 'propagation_vector_to_localization', 'polarization_newframe', + 't_lisa_from_ssb', 't_ssb_from_t_lisa', + 'ssb_to_lisa', 'lisa_to_ssb', + 'rotation_matrix_ssb_to_lisa', 'rotation_matrix_ssb_to_geo', + 'lisa_position_ssb', 'earth_position_ssb', + 't_geo_from_ssb', 't_ssb_from_t_geo', 'ssb_to_geo', 'geo_to_ssb', + 'lisa_to_geo', 'geo_to_lisa', + ] diff --git a/pycbc/events/coinc.py b/pycbc/events/coinc.py index 7b8ee9840d2..aa50554344d 100644 --- a/pycbc/events/coinc.py +++ b/pycbc/events/coinc.py @@ -35,14 +35,6 @@ from .eventmgr_cython import timecoincidence_findidxlen from .eventmgr_cython import timecluster_cython -# Mapping used in background_bin_from_string to select approximant for -# duration function, if duration-based binning is used. -_APPROXIMANT_DURATION_MAP = { - 'SEOBNRv2duration': 'SEOBNRv2', - 'SEOBNRv4duration': 'SEOBNRv4', - 'SEOBNRv5duration': 'SEOBNRv5_ROM' -} - def background_bin_from_string(background_bins, data): """ Return template ids for each bin as defined by the format string @@ -104,7 +96,7 @@ def background_bin_from_string(background_bins, data): elif bin_type == 'chi_eff': vals = pycbc.conversions.chi_eff(data['mass1'], data['mass2'], data['spin1z'], data['spin2z']) - elif bin_type in ['SEOBNRv2Peak', 'SEOBNRv4Peak']: + elif bin_type.endswith('Peak'): vals = pycbc.pnutils.get_freq( 'f' + bin_type, data['mass1'], @@ -113,14 +105,14 @@ def background_bin_from_string(background_bins, data): data['spin2z'] ) cached_values[bin_type] = vals - elif bin_type in _APPROXIMANT_DURATION_MAP: + elif bin_type.endswith('duration'): vals = pycbc.pnutils.get_imr_duration( data['mass1'], data['mass2'], data['spin1z'], data['spin2z'], data['f_lower'], - approximant=_APPROXIMANT_DURATION_MAP[bin_type] + approximant=bin_type.replace('duration', '') ) cached_values[bin_type] = vals else: @@ -990,6 +982,14 @@ def insert_args(parser): group.add_argument('--ifar-remove-threshold', type=float, help="NOT YET IMPLEMENTED", default=100.0) + @staticmethod + def verify_args(args, parser): + """Verify that psd-var-related options are consistent""" + if ((hasattr(args, 'psd_variation') and not args.psd_variation) + and 'psdvar' in args.sngl_ranking): + parser.error(f"The single ifo ranking stat {args.sngl_ranking} " + "requires --psd-variation.") + @property def background_time(self): """Return the amount of background time that the buffers contain""" diff --git a/pycbc/events/eventmgr.py b/pycbc/events/eventmgr.py index 56ad265c1d1..9d84af403f9 100644 --- a/pycbc/events/eventmgr.py +++ b/pycbc/events/eventmgr.py @@ -596,7 +596,7 @@ def add_template_events_to_ifo(self, ifo, columns, vectors): class EventManagerCoherent(EventManagerMultiDetBase): def __init__(self, opt, ifos, column, column_types, network_column, - network_column_types, psd=None, **kwargs): + network_column_types, time_slides, psd=None, **kwargs): super(EventManagerCoherent, self).__init__( opt, ifos, column, column_types, psd=None, **kwargs) self.network_event_dtype = \ @@ -612,6 +612,7 @@ def __init__(self, opt, ifos, column, column_types, network_column, self.event_index['network'] = 0 self.template_event_dict['network'] = numpy.array( [], dtype=self.network_event_dtype) + self.time_slides = time_slides def cluster_template_network_events(self, tcolumn, column, window_size, slide=0): @@ -727,6 +728,7 @@ def __setitem__(self, name, data): float(self.opt.sample_rate[ifo_str]) + \ self.opt.gps_start_time[ifo_str] f['time_index'] = ifo_events['time_index'] + f['slide_id'] = ifo_events['slide_id'] try: # Precessing template_sigmasq_plus = numpy.array( @@ -772,7 +774,7 @@ def __setitem__(self, name, data): f['chisq_dof'] = numpy.zeros(len(ifo_events)) f['template_hash'] = th[tid] - + f['search/time_slides'] = numpy.array(self.time_slides[ifo]) if self.opt.trig_start_time: f['search/start_time'] = numpy.array([ self.opt.trig_start_time[ifo]], dtype=numpy.int32) diff --git a/pycbc/events/stat.py b/pycbc/events/stat.py index 6966e2586ae..4151571e356 100644 --- a/pycbc/events/stat.py +++ b/pycbc/events/stat.py @@ -211,6 +211,9 @@ def rank_stat_single(self, single_info, """ Calculate the statistic for a single detector candidate + For this statistic this is just passing through the + single value, which will be the second entry in the tuple. + Parameters ---------- single_info: tuple @@ -222,7 +225,7 @@ def rank_stat_single(self, single_info, numpy.ndarray The array of single detector statistics """ - return self.single(single_info[1]) + return single_info[1] def rank_stat_coinc(self, sngls_list, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument @@ -663,6 +666,9 @@ def rank_stat_single(self, single_info, """ Calculate the statistic for a single detector candidate + For this statistic this is just passing through the + single value, which will be the second entry in the tuple. + Parameters ---------- single_info: tuple @@ -674,7 +680,7 @@ def rank_stat_single(self, single_info, numpy.ndarray The array of single detector statistics """ - return self.single(single_info[1]) + return single_info[1] def rank_stat_coinc(self, sngls_list, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument diff --git a/pycbc/io/hdf.py b/pycbc/io/hdf.py index ebecaec5c6a..754c63bf663 100644 --- a/pycbc/io/hdf.py +++ b/pycbc/io/hdf.py @@ -517,7 +517,8 @@ def mask_to_n_loudest_clustered_events(self, rank_method, be considered.""" # If this becomes memory intensive we can optimize - stat = rank_method.rank_stat_single((self.ifo, self.trig_dict())) + sds = rank_method.single(self.trig_dict()) + stat = rank_method.rank_stat_single((self.ifo, sds)) if len(stat) == 0: # No triggers, so just return here self.stat = np.array([]) diff --git a/pycbc/live/snr_optimizer.py b/pycbc/live/snr_optimizer.py index 5924d067304..389b0c6ca6f 100644 --- a/pycbc/live/snr_optimizer.py +++ b/pycbc/live/snr_optimizer.py @@ -166,6 +166,7 @@ def compute_minus_network_snr(v, *argv): if len(argv) == 1: argv = argv[0] nsnr, _ = compute_network_snr_core(v, *argv) + logging.debug('snr: %s', nsnr) return -nsnr @@ -177,27 +178,25 @@ def compute_minus_network_snr_pso(v, *argv, **kwargs): return -nsnr_array -def normalize_initial_point(initial_point, bounds): - return (initial_point - bounds[:,0]) / (bounds[:,1] - bounds[:,0]) - - def optimize_di(bounds, cli_args, extra_args, initial_point): + # Convert from dict to array with parameters in a given order bounds = numpy.array([ bounds['mchirp'], bounds['eta'], bounds['spin1z'], bounds['spin2z'] ]) - - # Currently only implemented for random seed initial array - rng = numpy.random.mtrand._rand - population_shape = (int(cli_args.snr_opt_di_popsize), 4) - population = rng.uniform(size=population_shape) + # Initialize the population with random values within specified bounds + population = numpy.random.uniform( + bounds[:, 0], + bounds[:, 1], + size=(int(cli_args.snr_opt_di_popsize), len(bounds)) + ) if cli_args.snr_opt_include_candidate: - # Re-normalize the initial point into the correct range - point_init = normalize_initial_point(initial_point, bounds) # add the initial point to the population - population = numpy.concatenate((population[:-1], point_init)) + population = numpy.concatenate((population[:-1], + initial_point)) + logging.debug('Initial population: %s', population) results = differential_evolution( compute_minus_network_snr, @@ -232,11 +231,6 @@ def optimize_shgo(bounds, cli_args, extra_args, initial_point): # pylint: disabl return results.x -def normalize_population(population, min_bounds, max_bounds): - norm_pop = min_bounds + population * (max_bounds - min_bounds) - return norm_pop - - def optimize_pso(bounds, cli_args, extra_args, initial_point): options = { 'c1': cli_args.snr_opt_pso_c1, @@ -256,17 +250,18 @@ def optimize_pso(bounds, cli_args, extra_args, initial_point): bounds['spin2z'][1] ]) - # Manually generate the initial points, this is the same as the default - # method, but allows us to make some modifications + # Initialize the population with random values within specified bounds population = numpy.random.uniform( - low=0.0, high=1.0, size=(int(cli_args.snr_opt_pso_particles), 4) + min_bounds, + max_bounds, + size=(int(cli_args.snr_opt_pso_particles), len(bounds)) ) - population = normalize_population(population, min_bounds, max_bounds) if cli_args.snr_opt_include_candidate: # add the initial point to the population population = numpy.concatenate((population[:-1], initial_point)) + logging.debug('Initial population: %s', population) optimizer = ps.single.GlobalBestPSO( n_particles=int(cli_args.snr_opt_pso_particles), diff --git a/pycbc/pnutils.py b/pycbc/pnutils.py index 95900040a59..9582ce7414e 100644 --- a/pycbc/pnutils.py +++ b/pycbc/pnutils.py @@ -521,7 +521,7 @@ def frequency_cutoff_from_name(name, m1, m2, s1z, s2z): f : float or numpy.array Frequency in Hz """ - params = {"mass1":m1, "mass2":m2, "spin1z":s1z, "spin2z":s2z} + params = {"mass1": m1, "mass2": m2, "spin1z": s1z, "spin2z": s2z} return named_frequency_cutoffs[name](params) def _get_imr_duration(m1, m2, s1z, s2z, f_low, approximant="SEOBNRv4"): @@ -532,20 +532,20 @@ def _get_imr_duration(m1, m2, s1z, s2z, f_low, approximant="SEOBNRv4"): chi = lalsim.SimIMRPhenomBComputeChi(m1, m2, s1z, s2z) time_length = lalsim.SimIMRSEOBNRv2ChirpTimeSingleSpin( m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, chi, f_low) - elif approximant == 'IMRPhenomXAS': + elif approximant == "IMRPhenomXAS": time_length = lalsim.SimIMRPhenomXASDuration( m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z, f_low) elif approximant == "IMRPhenomD": time_length = lalsim.SimIMRPhenomDChirpTime( m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z, f_low) - elif approximant == "SEOBNRv4": - # NB for no clear reason this function has f_low as first argument + elif approximant in ["SEOBNRv4", "SEOBNRv4_ROM"]: + # NB the LALSim function has f_low as first argument time_length = lalsim.SimIMRSEOBNRv4ROMTimeOfFrequency( f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z) - elif approximant == 'SEOBNRv5_ROM': + elif approximant in ["SEOBNRv5", "SEOBNRv5_ROM"]: time_length = lalsim.SimIMRSEOBNRv5ROMTimeOfFrequency( f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z) - elif approximant == 'SPAtmplt' or approximant == 'TaylorF2': + elif approximant in ["SPAtmplt", "TaylorF2"]: chi = lalsim.SimInspiralTaylorF2ReducedSpinComputeChi( m1, m2, s1z, s2z ) diff --git a/pycbc/psd/variation.py b/pycbc/psd/variation.py index 50a006faf99..993c5b4d212 100644 --- a/pycbc/psd/variation.py +++ b/pycbc/psd/variation.py @@ -3,11 +3,48 @@ import numpy from numpy.fft import rfft, irfft import scipy.signal as sig - +from scipy.interpolate import interp1d import pycbc.psd from pycbc.types import TimeSeries -from pycbc.filter import resample_to_delta_t + + +def create_full_filt(freqs, filt, plong, srate, psd_duration): + """Create a filter to convolve with strain data to find PSD variation. + + Parameters + ---------- + freqs : numpy.ndarray + Array of sample frequencies of the PSD. + filt : numpy.ndarray + A bandpass filter. + plong : numpy.ndarray + The estimated PSD. + srate : float + The sample rate of the data. + psd_duration : float + The duration of the estimated PSD. + + Returns + ------- + full_filt : numpy.ndarray + The full filter used to calculate PSD variation. + """ + + # Make the weighting filter - bandpass, which weight by f^-7/6, + # and whiten. The normalization is chosen so that the variance + # will be one if this filter is applied to white noise which + # already has a variance of one. + fweight = freqs ** (-7./6.) * filt / numpy.sqrt(plong) + fweight[0] = 0. + norm = (sum(abs(fweight) ** 2) / (len(fweight) - 1.)) ** -0.5 + fweight = norm * fweight + fwhiten = numpy.sqrt(2. / srate) / numpy.sqrt(plong) + fwhiten[0] = 0. + full_filt = sig.hann(int(psd_duration * srate)) * numpy.roll( + irfft(fwhiten * fweight), int(psd_duration / 2) * srate) + + return full_filt def mean_square(data, delta_t, srate, short_stride, stride): @@ -113,9 +150,6 @@ def calc_filt_psd_variation(strain, segment, short_segment, psd_long_segment, # Convert start and end times immediately to floats start_time = float(strain.start_time) end_time = float(strain.end_time) - - # Resample the data - strain = resample_to_delta_t(strain, 1.0 / 2048) srate = int(strain.sample_rate) # Fix the step for the PSD estimation and the time to remove at the @@ -158,18 +192,7 @@ def calc_filt_psd_variation(strain, segment, short_segment, psd_long_segment, freqs = numpy.array(plong.sample_frequencies, dtype=fs_dtype) plong = plong.numpy() - # Make the weighting filter - bandpass, which weight by f^-7/6, - # and whiten. The normalization is chosen so that the variance - # will be one if this filter is applied to white noise which - # already has a variance of one. - fweight = freqs ** (-7./6.) * filt / numpy.sqrt(plong) - fweight[0] = 0. - norm = (sum(abs(fweight) ** 2) / (len(fweight) - 1.)) ** -0.5 - fweight = norm * fweight - fwhiten = numpy.sqrt(2. / srate) / numpy.sqrt(plong) - fwhiten[0] = 0. - full_filt = sig.hann(int(psd_duration * srate)) * numpy.roll( - irfft(fwhiten * fweight), int(psd_duration / 2) * srate) + full_filt = create_full_filt(freqs, filt, plong, srate, psd_duration) # Convolve the filter with long segment of data wstrain = sig.fftconvolve(astrain, full_filt, mode='same') wstrain = wstrain[int(strain_crop * srate):-int(strain_crop * srate)] @@ -210,10 +233,171 @@ def find_trigger_value(psd_var, idx, start, sample_rate): # Extract the PSD variation at trigger time through linear # interpolation if not hasattr(psd_var, 'cached_psd_var_interpolant'): - from scipy import interpolate psd_var.cached_psd_var_interpolant = \ - interpolate.interp1d(psd_var.sample_times.numpy(), psd_var.numpy(), - fill_value=1.0, bounds_error=False) + interp1d(psd_var.sample_times.numpy(), + psd_var.numpy(), + fill_value=1.0, + bounds_error=False) vals = psd_var.cached_psd_var_interpolant(time) return vals + + +def live_create_filter(psd_estimated, + psd_duration, + sample_rate, + low_freq=20, + high_freq=480): + """ + Create a filter to be used in the calculation of the psd variation for the + PyCBC Live search. This filter combines a bandpass between a lower and + upper frequency and an estimated signal response so that the variance + will be 1 when the filter is applied to white noise. + + Within the PyCBC Live search this filter needs to be recreated every time + the estimated psd is updated and needs to be unique for each detector. + + Parameters + ---------- + psd_estimated : pycbc.frequencyseries + The current PyCBC Live PSD: variations are measured relative to this + estimate. + psd_duration : float + The duration of the estimation of the psd, in seconds. + sample_rate : int + The sample rate of the strain data being search over. + low_freq : int (default = 20) + The lower frequency to apply in the bandpass filter. + high_freq : int (default = 480) + The upper frequency to apply in the bandpass filter. + + Returns + ------- + full_filt : numpy.ndarray + The complete filter to be convolved with the strain data to + find the psd variation value. + + """ + + # Create a bandpass filter between low_freq and high_freq once + filt = sig.firwin(4 * sample_rate, + [low_freq, high_freq], + pass_zero=False, + window='hann', + nyq=sample_rate / 2) + filt.resize(int(psd_duration * sample_rate)) + + # Fourier transform the filter and take the absolute value to get + # rid of the phase. + filt = abs(rfft(filt)) + + # Extract the psd frequencies to create a representative filter. + freqs = numpy.array(psd_estimated.sample_frequencies, dtype=numpy.float32) + plong = psd_estimated.numpy() + full_filt = create_full_filt(freqs, filt, plong, sample_rate, psd_duration) + + return full_filt + + +def live_calc_psd_variation(strain, + full_filt, + increment, + data_trim=2.0, + short_stride=0.25): + """ + Calculate the psd variation in the PyCBC Live search. + + The Live strain data is convolved with the filter to produce a timeseries + containing the PSD variation values for each sample. The mean square of + the timeseries is calculated over the short_stride to find outliers caused + by short duration glitches. Outliers are replaced with the average of + adjacent elements in the array. This array is then further averaged every + second to produce the PSD variation timeseries. + + Parameters + ---------- + strain : pycbc.timeseries + Live data being searched through by the PyCBC Live search. + full_filt : numpy.ndarray + A filter created by `live_create_filter`. + increment : float + The number of seconds in each increment in the PyCBC Live search. + data_trim : float + The number of seconds to be trimmed from either end of the convolved + timeseries to prevent artefacts. + short_stride : float + The number of seconds to average the PSD variation timeseries over to + remove the effects of short duration glitches. + + Returns + ------- + psd_var : pycbc.timeseries + A timeseries containing the PSD variation values. + + """ + sample_rate = int(strain.sample_rate) + + # Grab the last increments worth of data, plus padding for edge effects. + astrain = strain.time_slice(strain.end_time - increment - (data_trim * 3), + strain.end_time) + + # Convolve the data and the filter to produce the PSD variation timeseries, + # then trim the beginning and end of the data to prevent edge effects. + wstrain = sig.fftconvolve(astrain, full_filt, mode='same') + wstrain = wstrain[int(data_trim * sample_rate):-int(data_trim * sample_rate)] + + # Create a PSD variation array by taking the mean square of the PSD + # variation timeseries every short_stride + short_ms = numpy.mean( + wstrain.reshape(-1, int(sample_rate * short_stride)) ** 2, axis=1) + + # Define an array of averages that is used to substitute outliers + ave = 0.5 * (short_ms[2:] + short_ms[:-2]) + outliers = short_ms[1:-1] > (2. * ave) + short_ms[1:-1][outliers] = ave[outliers] + + # Calculate the PSD variation every second by a moving window average + # containing (1/short_stride) short_ms samples. + m_s = [] + samples_per_second = 1 / short_stride + for idx in range(int(len(short_ms) / samples_per_second)): + start = int(samples_per_second * idx) + end = int(samples_per_second * (idx + 1)) + m_s.append(numpy.mean(short_ms[start:end])) + + m_s = numpy.array(m_s, dtype=wstrain.dtype) + psd_var = TimeSeries(m_s, + delta_t=1.0, + epoch=strain.end_time - increment - (data_trim * 2)) + + return psd_var + + +def live_find_var_value(triggers, + psd_var_timeseries): + """ + Extract the PSD variation values at trigger times by linear interpolation. + + Parameters + ---------- + triggers : dict + Dictionary containing input trigger times. + psd_var_timeseries : pycbc.timeseries + A timeseries containing the PSD variation value for each second of the + latest increment in PyCBC Live. + + Returns + ------- + psd_var_vals : numpy.ndarray + Array of interpolated PSD variation values at trigger times. + """ + + # Create the interpolator + interpolator = interp1d(psd_var_timeseries.sample_times.numpy(), + psd_var_timeseries.numpy(), + fill_value=1.0, + bounds_error=False) + # Evaluate at the trigger times + psd_var_vals = interpolator(triggers['end_time']) + + return psd_var_vals diff --git a/pycbc/results/pygrb_postprocessing_utils.py b/pycbc/results/pygrb_postprocessing_utils.py index 999b44174e4..cc1a4416969 100644 --- a/pycbc/results/pygrb_postprocessing_utils.py +++ b/pycbc/results/pygrb_postprocessing_utils.py @@ -615,19 +615,18 @@ def load_injections(inj_file, vetoes, sim_table=False, label=None): # ============================================================================= # Function to load timeslides # ============================================================================= -def load_time_slides(xml_file): +def load_time_slides(hdf_file_path): """Loads timeslides from PyGRB output file as a dictionary""" + hdf_file = h5py.File(hdf_file_path, 'r') + ifos = extract_ifos(hdf_file_path) + ids = numpy.arange(len(hdf_file[f'{ifos[0]}/search/time_slides'])) + time_slide_dict = { + slide_id: { + ifo: hdf_file[f'{ifo}/search/time_slides'][slide_id] + for ifo in ifos} + for slide_id in ids} - # Get all timeslides: these are number_of_ifos * number_of_timeslides - time_slide = load_xml_table(xml_file, glsctables.TimeSlideTable.tableName) - # Get a list of unique timeslide dictionaries - time_slide_list = [dict(i) for i in time_slide.as_dict().values()] - # Turn it into a dictionary indexed by the timeslide ID - time_slide_dict = {int(time_slide.get_time_slide_id(ov)): ov - for ov in time_slide_list} # Check time_slide_ids are ordered correctly. - ids = _get_id_numbers(time_slide, - "time_slide_id")[::len(time_slide_dict[0].keys())] if not (numpy.all(ids[1:] == numpy.array(ids[:-1])+1) and ids[0] == 0): err_msg = "time_slide_ids list should start at zero and increase by " err_msg += "one for every element" diff --git a/pycbc/transforms.py b/pycbc/transforms.py index d4e9f79894e..c8fa44a9a9b 100644 --- a/pycbc/transforms.py +++ b/pycbc/transforms.py @@ -1468,6 +1468,402 @@ def from_config(cls, cp, section, outputs): ) +class GEOToSSB(BaseTransform): + """Converts arrival time, sky localization, and polarization angle in the + geocentric frame to the corresponding values in the SSB frame.""" + + name = "geo_to_ssb" + + default_params_name = { + 'default_tc_geo': parameters.tc, + 'default_longitude_geo': parameters.ra, + 'default_latitude_geo': parameters.dec, + 'default_polarization_geo': parameters.polarization, + 'default_tc_ssb': parameters.tc, + 'default_longitude_ssb': parameters.eclipticlongitude, + 'default_latitude_ssb': parameters.eclipticlatitude, + 'default_polarization_ssb': parameters.polarization + } + + def __init__( + self, tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_geo_param = params[0] + self.longitude_geo_param = params[1] + self.latitude_geo_param = params[2] + self.polarization_geo_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + self._outputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + + super(GEOToSSB, self).__init__() + + def transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the geocentric frame to the corresponding + values in the SSB frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_ssb_param], out[self.longitude_ssb_param], \ + out[self.latitude_ssb_param], out[self.polarization_ssb_param] = \ + coordinates.geo_to_ssb( + maps[self.tc_geo_param], maps[self.longitude_geo_param], + maps[self.latitude_geo_param], maps[self.polarization_geo_param] + ) + return self.format_output(maps, out) + + def inverse_transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the SSB frame to the corresponding + values in the geocentric frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_geo_param], out[self.longitude_geo_param], \ + out[self.latitude_geo_param], out[self.polarization_geo_param] = \ + coordinates.ssb_to_geo( + maps[self.tc_ssb_param], maps[self.longitude_ssb_param], + maps[self.latitude_ssb_param], maps[self.polarization_ssb_param] + ) + return self.format_output(maps, out) + + @classmethod + def from_config(cls, cp, section, outputs): + tag = outputs + skip_opts = [] + additional_opts = {} + + # get custom variable names + variables = { + 'tc-geo': cls.default_params_name['default_tc_geo'], + 'longitude-geo': cls.default_params_name['default_longitude_geo'], + 'latitude-geo': cls.default_params_name['default_latitude_geo'], + 'polarization-geo': cls.default_params_name[ + 'default_polarization_geo'], + 'tc-ssb': cls.default_params_name['default_tc_ssb'], + 'longitude-ssb': cls.default_params_name['default_longitude_ssb'], + 'latitude-ssb': cls.default_params_name['default_latitude_ssb'], + 'polarization-ssb': cls.default_params_name[ + 'default_polarization_ssb'] + } + for param_name in variables.keys(): + name_underline = param_name.replace('-', '_') + if cp.has_option("-".join([section, outputs]), param_name): + skip_opts.append(param_name) + additional_opts.update( + {name_underline+'_param': cp.get_opt_tag( + section, param_name, tag)}) + else: + additional_opts.update( + {name_underline+'_param': variables[param_name]}) + + return super(GEOToSSB, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, + additional_opts=additional_opts + ) + + +class LISAToSSB(BaseTransform): + """Converts arrival time, sky localization, and polarization angle in the + LISA frame to the corresponding values in the SSB frame.""" + + name = "lisa_to_ssb" + + default_params_name = { + 'default_tc_lisa': parameters.tc, + 'default_longitude_lisa': parameters.eclipticlongitude, + 'default_latitude_lisa': parameters.eclipticlatitude, + 'default_polarization_lisa': parameters.polarization, + 'default_tc_ssb': parameters.tc, + 'default_longitude_ssb': parameters.eclipticlongitude, + 'default_latitude_ssb': parameters.eclipticlatitude, + 'default_polarization_ssb': parameters.polarization + } + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param] + self._outputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + super(LISAToSSB, self).__init__() + + def transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the LISA frame to the corresponding + values in the SSB frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_ssb_param], out[self.longitude_ssb_param], \ + out[self.latitude_ssb_param], out[self.polarization_ssb_param] = \ + coordinates.lisa_to_ssb( + maps[self.tc_lisa_param], maps[self.longitude_lisa_param], + maps[self.latitude_lisa_param], maps[self.polarization_lisa_param] + ) + return self.format_output(maps, out) + + def inverse_transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the SSB frame to the corresponding + values in the LISA frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_lisa_param], out[self.longitude_lisa_param], \ + out[self.latitude_lisa_param], \ + out[self.polarization_lisa_param] = \ + coordinates.ssb_to_lisa( + maps[self.tc_ssb_param], maps[self.longitude_ssb_param], + maps[self.latitude_ssb_param], maps[self.polarization_ssb_param] + ) + return self.format_output(maps, out) + + @classmethod + def from_config(cls, cp, section, outputs): + tag = outputs + skip_opts = [] + additional_opts = {} + + # get custom variable names + variables = { + 'tc-lisa': cls.default_params_name['default_tc_lisa'], + 'longitude-lisa': cls.default_params_name[ + 'default_longitude_lisa'], + 'latitude-lisa': cls.default_params_name['default_latitude_lisa'], + 'polarization-lisa': cls.default_params_name[ + 'default_polarization_lisa'], + 'tc-ssb': cls.default_params_name['default_tc_ssb'], + 'longitude-ssb': cls.default_params_name['default_longitude_ssb'], + 'latitude-ssb': cls.default_params_name['default_latitude_ssb'], + 'polarization-ssb': cls.default_params_name[ + 'default_polarization_ssb'] + } + for param_name in variables.keys(): + name_underline = param_name.replace('-', '_') + if cp.has_option("-".join([section, outputs]), param_name): + skip_opts.append(param_name) + additional_opts.update( + {name_underline+'_param': cp.get_opt_tag( + section, param_name, tag)}) + else: + additional_opts.update( + {name_underline+'_param': variables[param_name]}) + + return super(LISAToSSB, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, + additional_opts=additional_opts + ) + + +class LISAToGEO(BaseTransform): + """Converts arrival time, sky localization, and polarization angle in the + LISA frame to the corresponding values in the geocentric frame.""" + + name = "lisa_to_geo" + + default_params_name = { + 'default_tc_lisa': parameters.tc, + 'default_longitude_lisa': parameters.eclipticlongitude, + 'default_latitude_lisa': parameters.eclipticlatitude, + 'default_polarization_lisa': parameters.polarization, + 'default_tc_geo': parameters.tc, + 'default_longitude_geo': parameters.ra, + 'default_latitude_geo': parameters.dec, + 'default_polarization_geo': parameters.polarization + } + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_geo_param = params[4] + self.longitude_geo_param = params[5] + self.latitude_geo_param = params[6] + self.polarization_geo_param = params[7] + self._inputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param] + self._outputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + super(LISAToGEO, self).__init__() + + def transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the LISA frame to the corresponding + values in the geocentric frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_geo_param], out[self.longitude_geo_param], \ + out[self.latitude_geo_param], out[self.polarization_geo_param] = \ + coordinates.lisa_to_geo( + maps[self.tc_lisa_param], maps[self.longitude_lisa_param], + maps[self.latitude_lisa_param], maps[self.polarization_lisa_param] + ) + return self.format_output(maps, out) + + def inverse_transform(self, maps): + """This function transforms arrival time, sky localization, + and polarization angle in the geocentric frame to the corresponding + values in the LISA frame. + + Parameters + ---------- + maps : a mapping object + + Returns + ------- + out : dict + A dict with key as parameter name and value as numpy.array or float + of transformed values. + """ + out = {} + out[self.tc_lisa_param], out[self.longitude_lisa_param], \ + out[self.latitude_lisa_param], \ + out[self.polarization_lisa_param] = \ + coordinates.geo_to_lisa( + maps[self.tc_geo_param], maps[self.longitude_geo_param], + maps[self.latitude_geo_param], maps[self.polarization_geo_param] + ) + return self.format_output(maps, out) + + @classmethod + def from_config(cls, cp, section, outputs): + tag = outputs + skip_opts = [] + additional_opts = {} + + # get custom variable names + variables = { + 'tc-lisa': cls.default_params_name['default_tc_lisa'], + 'longitude-lisa': cls.default_params_name[ + 'default_longitude_lisa'], + 'latitude-lisa': cls.default_params_name['default_latitude_lisa'], + 'polarization-lisa': cls.default_params_name[ + 'default_polarization_lisa'], + 'tc-geo': cls.default_params_name['default_tc_geo'], + 'longitude-geo': cls.default_params_name['default_longitude_geo'], + 'latitude-geo': cls.default_params_name['default_latitude_geo'], + 'polarization-geo': cls.default_params_name[ + 'default_polarization_geo'] + } + for param_name in variables.keys(): + name_underline = param_name.replace('-', '_') + if cp.has_option("-".join([section, outputs]), param_name): + skip_opts.append(param_name) + additional_opts.update( + {name_underline+'_param': cp.get_opt_tag( + section, param_name, tag)}) + else: + additional_opts.update( + {name_underline+'_param': variables[param_name]}) + + return super(LISAToGEO, cls).from_config( + cp, section, outputs, skip_opts=skip_opts, + additional_opts=additional_opts + ) + + class Log(BaseTransform): """Applies a log transform from an `inputvar` parameter to an `outputvar` parameter. This is the inverse of the exponent transform. @@ -2060,6 +2456,117 @@ class ChiPToCartesianSpin(CartesianSpinToChiP): inverse_jacobian = inverse.jacobian +class SSBToGEO(GEOToSSB): + """The inverse of GEOToSSB.""" + + name = "ssb_to_geo" + inverse = GEOToSSB + transform = inverse.inverse_transform + inverse_transform = inverse.transform + + def __init__( + self, tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_geo_param = params[0] + self.longitude_geo_param = params[1] + self.latitude_geo_param = params[2] + self.polarization_geo_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + self._outputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + + +class SSBToLISA(LISAToSSB): + """The inverse of LISAToSSB.""" + + name = "ssb_to_lisa" + inverse = LISAToSSB + transform = inverse.inverse_transform + inverse_transform = inverse.transform + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_ssb_param=None, longitude_ssb_param=None, + latitude_ssb_param=None, polarization_ssb_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_ssb_param, longitude_ssb_param, + latitude_ssb_param, polarization_ssb_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_ssb_param = params[4] + self.longitude_ssb_param = params[5] + self.latitude_ssb_param = params[6] + self.polarization_ssb_param = params[7] + self._inputs = [self.tc_ssb_param, self.longitude_ssb_param, + self.latitude_ssb_param, self.polarization_ssb_param] + self._outputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param] + + +class GEOToLISA(LISAToGEO): + """The inverse of LISAToGEO.""" + + name = "geo_to_lisa" + inverse = LISAToGEO + transform = inverse.inverse_transform + inverse_transform = inverse.transform + + def __init__( + self, tc_lisa_param=None, longitude_lisa_param=None, + latitude_lisa_param=None, polarization_lisa_param=None, + tc_geo_param=None, longitude_geo_param=None, + latitude_geo_param=None, polarization_geo_param=None + ): + params = [tc_lisa_param, longitude_lisa_param, + latitude_lisa_param, polarization_lisa_param, + tc_geo_param, longitude_geo_param, + latitude_geo_param, polarization_geo_param] + for index in range(len(params)): + if params[index] is None: + key = list(self.default_params_name.keys())[index] + params[index] = self.default_params_name[key] + + self.tc_lisa_param = params[0] + self.longitude_lisa_param = params[1] + self.latitude_lisa_param = params[2] + self.polarization_lisa_param = params[3] + self.tc_geo_param = params[4] + self.longitude_geo_param = params[5] + self.latitude_geo_param = params[6] + self.polarization_geo_param = params[7] + self._inputs = [self.tc_geo_param, self.longitude_geo_param, + self.latitude_geo_param, self.polarization_geo_param] + self._outputs = [self.tc_lisa_param, self.longitude_lisa_param, + self.latitude_lisa_param, self.polarization_lisa_param] + + class Exponent(Log): """Applies an exponent transform to an `inputvar` parameter. @@ -2202,6 +2709,9 @@ def from_config(cls, cp, section, outputs, ChiPToCartesianSpin.inverse = CartesianSpinToChiP Log.inverse = Exponent Logit.inverse = Logistic +GEOToSSB.inverse = SSBToGEO +LISAToSSB.inverse = SSBToLISA +LISAToGEO.inverse = GEOToLISA # @@ -2241,6 +2751,12 @@ def from_config(cls, cp, section, outputs, LambdaFromTOVFile.name: LambdaFromTOVFile, LambdaFromMultipleTOVFiles.name: LambdaFromMultipleTOVFiles, AlignTotalSpin.name: AlignTotalSpin, + GEOToSSB.name: GEOToSSB, + SSBToGEO.name: SSBToGEO, + LISAToSSB.name: LISAToSSB, + SSBToLISA.name: SSBToLISA, + LISAToGEO.name: LISAToGEO, + GEOToLISA.name: GEOToLISA, } # standard CBC transforms: these are transforms that do not require input @@ -2269,6 +2785,9 @@ def from_config(cls, cp, section, outputs, PrecessionMassSpinToCartesianSpin(), ChiPToCartesianSpin(), ChirpDistanceToDistance(), + GEOToSSB(), + LISAToSSB(), + LISAToGEO(), ] common_cbc_inverse_transforms = [ _t.inverse() diff --git a/pycbc/waveform/parameters.py b/pycbc/waveform/parameters.py index 47cb9b43b0f..200350a0eea 100644 --- a/pycbc/waveform/parameters.py +++ b/pycbc/waveform/parameters.py @@ -417,20 +417,22 @@ def docstr(self, prefix='', include_label=True): dtype=float, default=0., label=r"$\delta$", description="Mean anomaly of the periastron (rad).") tc = Parameter("tc", - dtype=float, default=None, label=r"$t_c$ (s)", - description="Coalescence time (s).") + dtype=float, default=None, label=r"$t_c$ (s)", + description="Coalescence time (s) is the time when a GW " + "reaches the origin of a certain coordinate system.") delta_tc = Parameter("delta_tc", dtype=float, label=r"$\Delta t_c~(\rm{s})$", description="Coalesence time offset.") ra = Parameter("ra", - dtype=float, default=0., label=r"$\alpha$", - description="Right ascension (rad).") + dtype=float, default=0., label=r"$\alpha$", + description="Right ascension (rad).") dec = Parameter("dec", dtype=float, default=0., label=r"$\delta$", description="Declination (rad).") polarization = Parameter("polarization", dtype=float, default=0., label=r"$\psi$", - description="Polarization (rad).") + description="Polarization angle (rad) in " + "a certain coordinate system.") redshift = Parameter("redshift", dtype=float, default=None, label=r"$z$", description="Redshift.") @@ -438,11 +440,11 @@ def docstr(self, prefix='', include_label=True): label=r"$V_C~(\rm{Mpc}^3)$", description="Comoving volume (in cubic Mpc).") eclipticlatitude = Parameter("eclipticlatitude", - dtype=float, default=0., label=r"$\beta$", - description="eclipticlatitude wrt SSB coords.") + dtype=float, default=0., label=r"$\beta$", + description="eclipticlatitude in SSB/LISA coords.") eclipticlongitude = Parameter("eclipticlongitude", - dtype=float, default=0., label=r"$\lambda$", - description="eclipticlongitude wrt SSB coords.") + dtype=float, default=0., label=r"$\lambda$", + description="eclipticlongitude in SSB/LISA coords.") # # Calibration parameters @@ -553,8 +555,9 @@ def docstr(self, prefix='', include_label=True): # ============================================================================= # -# parameters describing the location of a binary w.r.t. the Earth. Note: we -# do not include distance here. This is because these parameters are not +# parameters describing the location of a binary w.r.t. +# the geocentric/LISA/SSB frame. +# Note: we do not include distance here. This is because these are not # passed to the waveform generators in lalsimulation, but are instead applied # after a waveform is generated. Distance, however, is a parameter used by # the waveform generators. diff --git a/pycbc/workflow/matched_filter.py b/pycbc/workflow/matched_filter.py index 7296cf34a86..eae944511d8 100644 --- a/pycbc/workflow/matched_filter.py +++ b/pycbc/workflow/matched_filter.py @@ -238,6 +238,8 @@ def setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs, if match_fltr_exe == 'pycbc_multi_inspiral': exe_class = select_matchedfilter_class(match_fltr_exe) + # Right ascension + declination provided in degrees, + # so convert to radians cp.set('inspiral', 'ra', str(radians(float(cp.get('workflow', 'ra'))))) cp.set('inspiral', 'dec', diff --git a/pycbc/workflow/minifollowups.py b/pycbc/workflow/minifollowups.py index 1f16c714e71..5e59533da5d 100644 --- a/pycbc/workflow/minifollowups.py +++ b/pycbc/workflow/minifollowups.py @@ -125,7 +125,8 @@ def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, def setup_single_det_minifollowups(workflow, single_trig_file, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, veto_file=None, - veto_segment_name=None, statfiles=None, + veto_segment_name=None, fg_file=None, + fg_name=None, statfiles=None, tags=None): """ Create plots that followup the Nth loudest clustered single detector triggers from a merged single detector trigger HDF file. @@ -192,8 +193,11 @@ def setup_single_det_minifollowups(workflow, single_trig_file, tmpltbank_file, assert(veto_segment_name is not None) node.add_input_opt('--veto-file', veto_file) node.add_opt('--veto-segment-name', veto_segment_name) + if fg_file is not None: + assert(fg_name is not None) + node.add_input_opt('--foreground-censor-file', fg_file) + node.add_opt('--foreground-segment-name', fg_name) if statfiles: - statfiles = statfiles.find_output_with_ifo(curr_ifo) node.add_input_list_opt('--statistic-files', statfiles) if tags: node.add_list_opt('--tags', tags) @@ -555,7 +559,7 @@ def make_coinc_info(workflow, singles, bank, coinc, out_dir, return files def make_sngl_ifo(workflow, sngl_file, bank_file, trigger_id, out_dir, ifo, - title=None, tags=None): + statfiles=None, title=None, tags=None): """Setup a job to create sngl detector sngl ifo html summary snippet. """ tags = [] if tags is None else tags @@ -568,6 +572,8 @@ def make_sngl_ifo(workflow, sngl_file, bank_file, trigger_id, out_dir, ifo, node.add_input_opt('--bank-file', bank_file) node.add_opt('--trigger-id', str(trigger_id)) node.add_opt('--instrument', ifo) + if statfiles is not None: + node.add_input_list_opt('--statistic-files', statfiles) if title is not None: node.add_opt('--title', f'"{title}"') node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file') diff --git a/pycbc/workflow/pegasus_sites.py b/pycbc/workflow/pegasus_sites.py index b6e5813d045..15f578326d0 100644 --- a/pycbc/workflow/pegasus_sites.py +++ b/pycbc/workflow/pegasus_sites.py @@ -21,7 +21,12 @@ from Pegasus.api import Directory, FileServer, Site, Operation, Namespace from Pegasus.api import Arch, OS, SiteCatalog -from pycbc.version import last_release # noqa +from pycbc.version import last_release, version, release # noqa + +if release == 'True': + sing_version = version +else: + sing_version = last_release # NOTE urllib is weird. For some reason it only allows known schemes and will # give *wrong* results, rather then failing, if you use something like gsiftp @@ -218,7 +223,7 @@ def add_osg_site(sitecat, cp): "(HAS_LIGO_FRAMES =?= True) && " "(IS_GLIDEIN =?= True)") cvmfs_loc = '"/cvmfs/singularity.opensciencegrid.org/pycbc/pycbc-el8:v' - cvmfs_loc += last_release + '"' + cvmfs_loc += sing_version + '"' site.add_profiles(Namespace.CONDOR, key="+SingularityImage", value=cvmfs_loc) # On OSG failure rate is high diff --git a/setup.py b/setup.py index 4fa6b1fbb7f..6e414aae033 100755 --- a/setup.py +++ b/setup.py @@ -119,7 +119,7 @@ def __getattr__(self, attr): vinfo = _version_helper.generate_git_version_info() except: vinfo = vdummy() - vinfo.version = '2.4.dev0' + vinfo.version = '2.4.dev1' vinfo.release = 'False' version_script = f"""# coding: utf-8 diff --git a/test/test_coordinates_space.py b/test/test_coordinates_space.py new file mode 100644 index 00000000000..6e4f8883371 --- /dev/null +++ b/test/test_coordinates_space.py @@ -0,0 +1,358 @@ +# Copyright (C) 2023 Shichao Wu, Alex Nitz +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +import numpy +from pycbc.coordinates import space +import unittest +from utils import simple_exit + + +seed = 8202 +numpy.random.seed(seed) + +# ranges to draw random numbers for each parameter +RANGES = { + "tc_geo" : (1126259462.43, 1426259462.43), + "ra" : (0.0, 2 * numpy.pi), + "dec" : (-numpy.pi / 2, numpy.pi / 2), + "polarization_geo" : (0.0, 2 * numpy.pi), + "tc_ssb" : (1126259462.43, 1426259462.43), + "eclipticlongitude_ssb" : (0.0, 2 * numpy.pi), + "eclipticlatitude_ssb" : (-numpy.pi / 2, numpy.pi / 2), + "polarization_ssb" : (0.0, 2 * numpy.pi), + "tc_lisa" : (1126259462.43, 1426259462.43), + "eclipticlongitude_lisa" : (0.0, 2 * numpy.pi), + "eclipticlatitude_lisa" : (-numpy.pi / 2, numpy.pi / 2), + "polarization_lisa" : (0.0, 2 * numpy.pi), +} + +def almost_equal(derived_val, check_val, precision=1e-2): + """Checks whether the difference in the derived and check values are less + than the given precision. + """ + allpass = numpy.allclose(derived_val, check_val, atol=precision) + if not allpass: + absdiff = abs(derived_val - check_val) + maxidx = absdiff.argmax() + maxdiff = absdiff[maxidx] + else: + maxdiff = maxidx = None + return allpass, maxdiff, maxidx + +def curve_similarity(curve_1, curve_2): + """Using the Euclidean distance to check + the similarity between two curves. + """ + return numpy.linalg.norm(curve_1 - curve_2) + + +class TestParams(unittest.TestCase): + def setUp(self, *args): + self.numtests = 1000 + self.precision = 1e-8 + + # generate some random points + random_params = { + p : numpy.random.uniform(*RANGES[p], size=self.numtests) + for p in RANGES.keys()} + self.tc_ssb = random_params['tc_ssb'] + self.eclipticlongitude_ssb = random_params['eclipticlongitude_ssb'] + self.eclipticlatitude_ssb = random_params['eclipticlatitude_ssb'] + self.polarization_ssb = random_params['polarization_ssb'] + self.tc_lisa = random_params['tc_lisa'] + self.eclipticlongitude_lisa = random_params['eclipticlongitude_lisa'] + self.eclipticlatitude_lisa = random_params['eclipticlatitude_lisa'] + self.polarization_lisa = random_params['polarization_lisa'] + self.tc_geo = random_params['tc_geo'] + self.ra = random_params['ra'] + self.dec = random_params['dec'] + self.polarization_geo = random_params['polarization_geo'] + + # calculate derived parameters from each + + self.tc_lisa_derived, self.eclipticlongitude_lisa_derived, \ + self.eclipticlatitude_lisa_derived, self.polarization_lisa_derived = \ + space.ssb_to_lisa( + self.tc_ssb, self.eclipticlongitude_ssb, + self.eclipticlatitude_ssb, self.polarization_ssb) + + self.tc_geo_derived, self.ra_derived, \ + self.dec_derived, self.polarization_geo_derived = \ + space.lisa_to_geo( + self.tc_lisa, self.eclipticlongitude_lisa, + self.eclipticlatitude_lisa, self.polarization_lisa) + + self.tc_ssb_derived, self.eclipticlongitude_ssb_derived, \ + self.eclipticlatitude_ssb_derived, self.polarization_ssb_derived = \ + space.geo_to_ssb( + self.tc_geo, self.ra, + self.dec, self.polarization_geo) + + def test_round_robin(self): + """Computes inverse transformations to get original parameters from + derived, then compares them to the original. + """ + msg = '{} does not recover same {}; max difference: {}; inputs: {}' + # following lists (function to check, + # arguments to pass to the function, + # name of self's attribute to compare to) + fchecks = [ + (space.ssb_to_geo, + (self.tc_ssb_derived, self.eclipticlongitude_ssb_derived, + self.eclipticlatitude_ssb_derived, + self.polarization_ssb_derived), + ('tc_geo', 'ra', + 'dec', 'polarization_geo')), + (space.geo_to_lisa, + (self.tc_geo_derived, self.ra_derived, + self.dec_derived, self.polarization_geo_derived), + ('tc_lisa', 'eclipticlongitude_lisa', + 'eclipticlatitude_lisa', 'polarization_lisa')), + (space.lisa_to_ssb, + (self.tc_lisa_derived, self.eclipticlongitude_lisa_derived, + self.eclipticlatitude_lisa_derived, + self.polarization_lisa_derived), + ('tc_ssb', 'eclipticlongitude_ssb', + 'eclipticlatitude_ssb', 'polarization_ssb')), + ] + + for func, args, compval in fchecks: + func_tuple = func(*args) + attr_tuple = [getattr(self, i) for i in compval] + for i in range(len(func_tuple)): + derived_val = func_tuple[i] + check_val = attr_tuple[i] + passed, maxdiff, maxidx = almost_equal(derived_val, check_val, + self.precision) + if not passed: + failinputs = [p[maxidx] for p in args] + else: + failinputs = None + self.assertTrue(passed, msg.format( + func, compval, maxdiff, failinputs)) + + def test_polarization(self): + """Set up a custom V-shape ground-based detector which is almost + co-aligned and co-located to LISA-Z channel. When the long-wavelength + approximation is valid, the detector responses of those two should be + very similar. The transform functions in `pycbc.coordinates.space` are + used in the calculation. + """ + from pycbc.waveform import get_fd_det_waveform + from pycbc.coordinates.space import (ssb_to_lisa, lisa_to_ssb, + ssb_to_geo, lisa_to_geo) + + from pycbc.types.frequencyseries import FrequencySeries + from pycbc.detector import (Detector, load_detector_config, + add_detector_on_earth, + _ground_detectors) + from pycbc.waveform import get_td_waveform, get_fd_waveform + import importlib + + def is_module_installed(module_name): + try: + importlib.import_module(module_name) + return True + except ImportError: + return False + + def strain_generator(det='D1', model='IMRPhenomD', fs=4096, df=None, + flow=2, fref=2, tc=0, params=None, + fd_taper=False): + + # Generate a waveform at the detector-frame. + hp, hc = get_td_waveform(approximant=model, + mass1=params['mass1'], mass2=params['mass2'], + spin1x=0, spin1y=0, + spin1z=params['spin1z'], spin2x=0, + spin2y=0, spin2z=params['spin2z'], + distance=params['distance'], + coa_phase=params['coa_phase'], + inclination=params['inclination'], f_lower=flow, + f_ref=fref, delta_t=1.0/fs) + + # Set merger time to 'tc'. + hp.start_time += tc + hc.start_time += tc + + # Project GW waveform onto GW detectors. + ra = params['ra'] + dec = params['dec'] + psi = params['polarization'] + time = hp.start_time + + det_1 = Detector("D1") + fp_1, fc_1 = det_1.antenna_pattern( + right_ascension=ra, declination=dec, + polarization=psi, t_gps=tc) + + # Not take the rotation of the earth into account. + ht_1 = fp_1*hp + fc_1*hc + ht_list = [ht_1] + + return ht_list + + + # Checking if bbhx is installed, if not, then ignore this test + # and raise a warning. + # TODO: we need to implement a better way to install bbhx package. + bbhx_installed = is_module_installed('bbhx') + if not bbhx_installed: + passed = True + print("Ignore polarization test, because bbhx is not installed.") + else: + # cunstom E3 + # All of those hard-coded numbers are carefully chosen, + # in order to almost co-align and co-locate both 2 detectors. + fine_tunning = 7992204.094271309 + OMEGA_0 = 1.99098659277e-7 + yangle = numpy.pi / 2 + fine_tunning * OMEGA_0 + add_detector_on_earth(name='D1', longitude=1.8895427761465164, + latitude=0.11450614784814996, yangle=yangle, + xangle=yangle+numpy.pi/3, height=0) + + # set parameters + params = {} + YRSID_SI = 31558149.763545603 + params['ref_frame'] = 'SSB' + params['approximant'] = 'BBHX_PhenomD' + params['base_approximant'] = 'BBHX_PhenomD' + params['coa_phase'] = 0.0 + params['mass1'] = 1e6 + params['mass2'] = 8e5 + params['spin1z'] = 0.0 + params['spin2z'] = 0.0 + params['distance'] = 410 + params['inclination'] = numpy.pi/2 # edge-on + params['t_obs_start'] = 1*YRSID_SI + params['delta_f'] = 1./params['t_obs_start'] + params['f_lower'] = 1e-4 + params['f_ref'] = 8e-4 + params['f_final'] = 0.1 + params['delta_t'] = 1/0.2 + params['t_offset'] = 9206958.120016199 # 0 degrees + t_lisa = YRSID_SI - params['t_offset'] + fine_tunning + + nx = 100 + longitude_array_high_res = numpy.linspace( + 0, 2*numpy.pi, num=nx, endpoint=False) + + amp_E3_psi = [] + phase_E3_psi = [] + amp_LISA3_psi = [] + phase_LISA3_psi = [] + + for polarization_lisa in numpy.linspace( + 0, 2*numpy.pi, endpoint=False, num=nx): + params['tc'], params['eclipticlongitude'], \ + params['eclipticlatitude'], params['polarization'] = \ + lisa_to_ssb(t_lisa, 0, numpy.pi/4, + polarization_lisa, params['t_offset']) + + nparams = {'mass1':params['mass1'], 'mass2':params['mass2'], + 'spin1z':params['spin1z'], + 'spin2z':params['spin2z'], + 'f_lower':params['f_lower']} + + bbhx_fd = get_fd_det_waveform( + ifos=['LISA_A','LISA_E','LISA_T'], **params) + lisa_X_fd = -numpy.sqrt(2)/2 * bbhx_fd['LISA_A'] + \ + numpy.sqrt(6)/6 * bbhx_fd['LISA_E'] + \ + numpy.sqrt(3)/3 * bbhx_fd['LISA_T'] + lisa_Y_fd = -numpy.sqrt(6)/3 * bbhx_fd['LISA_E'] + \ + numpy.sqrt(3)/3 * bbhx_fd['LISA_T'] + lisa_Z_fd = numpy.sqrt(2)/2 * bbhx_fd['LISA_A'] + \ + numpy.sqrt(6)/6 * bbhx_fd['LISA_E'] + \ + numpy.sqrt(3)/3 * bbhx_fd['LISA_T'] + channel_xyz_fd = {'LISA_X':lisa_X_fd, + 'LISA_Y':lisa_Y_fd, + 'LISA_Z':lisa_Z_fd} + + t_geo, ra, dec, polarization_geo = \ + ssb_to_geo(params['tc'], params['eclipticlongitude'], + params['eclipticlatitude'], + params['polarization']) + + params_3g = params.copy() + params_3g['tc'] = t_geo + params_3g['ra'] = ra + params_3g['dec'] = dec + params_3g['polarization'] = polarization_geo + params_3g['coa_phase'] = numpy.pi/2 - params['coa_phase'] + + E3_signal = strain_generator(det='D1', model='IMRPhenomD', + fs=1./params_3g['delta_t'], + flow=params_3g['f_lower'], + fref=params_3g['f_ref'], + tc=params_3g['tc'], + params=params_3g, + fd_taper=False) + E3_signal_fd = { + 'DIY_E3':E3_signal[0].to_frequencyseries( + params_3g['delta_f']) + } + + index_E3 = numpy.where(numpy.abs( + E3_signal_fd['DIY_E3'].sample_frequencies - + params_3g['f_ref']) < 1e-7) + index_LISA3 = numpy.where(numpy.abs( + bbhx_fd['LISA_A'].sample_frequencies - + params['f_ref']) < 1e-7) + amp_E3 = numpy.abs(E3_signal_fd['DIY_E3'])[index_E3[0][0]] + amp_LISA3 = numpy.abs( + channel_xyz_fd['LISA_Z'][index_LISA3[0][0]]) + phase_E3 = numpy.rad2deg(numpy.angle( + E3_signal_fd['DIY_E3'][index_E3[0][0]])) + phase_LISA3 = numpy.rad2deg(numpy.angle( + channel_xyz_fd['LISA_Z'][index_LISA3[0][0]])) + + amp_E3_psi.append(amp_E3) + phase_E3_psi.append(phase_E3) + amp_LISA3_psi.append(amp_LISA3) + phase_LISA3_psi.append(phase_LISA3) + + dist_amp = curve_similarity(amp_E3_psi/numpy.mean(amp_E3_psi), + amp_LISA3_psi/numpy.mean( + amp_LISA3_psi)) + dist_phase = curve_similarity(numpy.array(phase_E3_psi), + numpy.array(phase_LISA3_psi)) + # Here we compare the amplitude and phase as a function of the + # polarization angle in the LISA frame, because E3 and LISA-Z are + # almost co-aligned and co-located, their detector response should + # be very similar (when long-wavelength approximation holds), + # in our case, `dist_amp` and `dist_phase` should be very similar + # (if the frame transform functions work correctly), users can + # modify this script to plot those curves (amp_E3_psi, + # amp_LISA3_psi, phase_E3_psi, amp_LISA3_psi). The hard-coded + # values below are the reference values for the difference which + # I got on my laptop, those curves are not exactly the same, + # especially for the phase, but they are almost consistent with + # each other visually. + if (numpy.abs(dist_amp - 0.2838670151034317) < 1e-2) and \ + (numpy.abs(dist_phase - 151.77197349820668) < 1e-2): + passed = True + else: + passed = False + + self.assertTrue(passed) + + +suite = unittest.TestSuite() +suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParams)) + +if __name__ == '__main__': + results = unittest.TextTestRunner(verbosity=2).run(suite) + simple_exit(results) diff --git a/test/test_transforms.py b/test/test_transforms.py index c5cd0df63e9..27b628cb13a 100644 --- a/test/test_transforms.py +++ b/test/test_transforms.py @@ -44,6 +44,12 @@ "xi1" : (0.0, 1.0), "xi2" : (0.0, 1.0), "chirp_distance" : (2.0, 10.0), + "tc" : (1126259462.43, 1526259462.43), + "ra" : (0.0, 2 * numpy.pi), + "dec" : (-numpy.pi / 2, numpy.pi / 2), + "eclipticlongitude" : (0.0, 2 * numpy.pi), + "eclipticlatitude" : (-numpy.pi / 2, numpy.pi / 2), + "polarization" : (0.0, 2 * numpy.pi), } # tests only need to happen on the CPU @@ -96,4 +102,3 @@ def test_inverse(self): if __name__ == "__main__": results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results) - diff --git a/tox.ini b/tox.ini index e41f67686d2..54317f89982 100644 --- a/tox.ini +++ b/tox.ini @@ -18,12 +18,27 @@ conda_deps= openssl=1.1 m2crypto conda_channels=conda-forge +platform = lin: linux + mac: darwin # This test should run on almost anybody's environment [testenv:py-unittest] deps = {[base]deps} pytest + ; Needed for `BBHx` package to work with PyCBC + git+https://github.com/mikekatz04/BBHx.git; sys_platform == 'linux' + git+https://github.com/gwastro/BBHX-waveform-model.git; sys_platform == 'linux' +conda_deps= + mysqlclient + lin: gcc_linux-64>=12.2.0 + lin: gxx_linux-64>=12.2.0 + ; mac doesn't work, need fix + ; mac: clang_osx-64 + ; mac: clangxx_osx-64 + gsl + lapack==3.6.1 +conda_channels=conda-forge commands = pytest # The following are long running or may require @@ -54,11 +69,14 @@ deps = {[base]deps} ; Needed for `BBHx` package to work with PyCBC git+https://github.com/mikekatz04/BBHx.git; sys_platform == 'linux' - git+https://github.com/ConWea/BBHX-waveform-model.git; sys_platform == 'linux' + git+https://github.com/gwastro/BBHX-waveform-model.git; sys_platform == 'linux' conda_deps= mysqlclient - gcc_linux-64>=12.2.0 - gxx_linux-64>=12.2.0 + lin: gcc_linux-64>=12.2.0 + lin: gxx_linux-64>=12.2.0 + ; mac doesn't work, need fix + ; mac: clang_osx-64 + ; mac: clangxx_osx-64 binutils_linux-64>=2.39 gsl lapack==3.6.1