diff --git a/lib/galaxy/datatypes/interval.py b/lib/galaxy/datatypes/interval.py index c681dafb8c1c..9bd5b323b8b7 100644 --- a/lib/galaxy/datatypes/interval.py +++ b/lib/galaxy/datatypes/interval.py @@ -284,7 +284,7 @@ def as_ucsc_display_file(self, dataset: DatasetProtocol, **kwd) -> Union[FileObj if t >= 0: # strand column (should) exists for i, elems in enumerate(compression_utils.file_iter(dataset.get_file_name())): strand = "+" - name = "region_%i" % i + name = f"region_{i}" if n >= 0 and n < len(elems): name = cast(str, elems[n]) if t < len(elems): @@ -293,7 +293,7 @@ def as_ucsc_display_file(self, dataset: DatasetProtocol, **kwd) -> Union[FileObj fh.write("{}\n".format("\t".join(tmp))) elif n >= 0: # name column (should) exists for i, elems in enumerate(compression_utils.file_iter(dataset.get_file_name())): - name = "region_%i" % i + name = f"region_{i}" if n >= 0 and n < len(elems): name = cast(str, elems[n]) tmp = [elems[c], elems[s], elems[e], name] @@ -346,8 +346,7 @@ def ucsc_links(self, dataset: DatasetProtocol, type: str, app, base_url: str) -> filename="ucsc_" + site_name, ) display_url = quote_plus( - "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" - % ( + "{}{}/display_as?id={}&display_app={}&authz_method=display_at".format( base_url, app.url_for(controller="root"), dataset.id, @@ -784,8 +783,7 @@ def _get_remote_call_url( internal_url = f"{app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename=f'{type}_{site_name}')}" base_url = app.config.get("display_at_callback", base_url) display_url = quote_plus( - "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" - % ( + "{}{}/display_as?id={}&display_app={}&authz_method=display_at".format( base_url, app.url_for(controller="root"), dataset.id, @@ -1567,8 +1565,7 @@ def ucsc_links(self, dataset: DatasetProtocol, type: str, app, base_url: str) -> if site_name in app.datatypes_registry.get_display_sites("ucsc"): internal_url = f"{app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name)}" display_url = quote_plus( - "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" - % ( + "{}{}/display_as?id={}&display_app={}&authz_method=display_at".format( base_url, app.url_for(controller="root"), dataset.id, diff --git a/lib/galaxy/datatypes/mothur.py b/lib/galaxy/datatypes/mothur.py index f025b16805a6..ab030577d233 100644 --- a/lib/galaxy/datatypes/mothur.py +++ b/lib/galaxy/datatypes/mothur.py @@ -1085,7 +1085,7 @@ def make_html_table(self, dataset: DatasetProtocol, skipchars: Optional[List] = out += "2. Flows" for i in range(3, dataset.metadata.columns + 1): base = dataset.metadata.flow_order[(i + 1) % 4] - out += "%d. %s" % (i - 2, base) + out += f"{i - 2}. {base}" out += "" out += self.make_html_peek_rows(dataset, skipchars=skipchars) out += "" diff --git a/lib/galaxy/datatypes/sequence.py b/lib/galaxy/datatypes/sequence.py index 844934021a78..853e05d1e035 100644 --- a/lib/galaxy/datatypes/sequence.py +++ b/lib/galaxy/datatypes/sequence.py @@ -76,7 +76,7 @@ def set_peek(self, dataset: DatasetProtocol, **kwd) -> None: parsed_data = json.load(open(dataset.get_file_name())) # dataset.peek = json.dumps(data, sort_keys=True, indent=4) dataset.peek = data.get_file_peek(dataset.get_file_name()) - dataset.blurb = "%d sections" % len(parsed_data["sections"]) + dataset.blurb = "{} sections".format(len(parsed_data["sections"])) except Exception: dataset.peek = "Not FQTOC file" dataset.blurb = "Not FQTOC file" @@ -465,7 +465,7 @@ def split(cls, input_datasets: List, subdir_generator_function: Callable, split_ raise Exception("Tool does not define a split mode") elif split_params["split_mode"] == "number_of_parts": split_size = int(split_params["split_size"]) - log.debug("Split %s into %i parts..." % (input_file, split_size)) + log.debug("Split %s into %i parts...", input_file, split_size) # if split_mode = number_of_parts, and split_size = 10, and # we know the number of sequences (say 1234), then divide by # by ten, giving ten files of approx 123 sequences each. @@ -484,7 +484,7 @@ def split(cls, input_datasets: List, subdir_generator_function: Callable, split_ # Split the input file into as many sub-files as required, # each containing to_size many sequences batch_size = int(split_params["split_size"]) - log.debug("Split %s into batches of %i records..." % (input_file, batch_size)) + log.debug("Split %s into batches of %i records...", input_file, batch_size) cls._count_split(input_file, batch_size, subdir_generator_function) else: raise Exception(f"Unsupported split mode {split_params['split_mode']}") @@ -496,7 +496,7 @@ def _size_split(cls, input_file: str, chunk_size: int, subdir_generator_function This does of course preserve complete records - it only splits at the start of a new FASTQ sequence record. """ - log.debug("Attemping to split FASTA file %s into chunks of %i bytes" % (input_file, chunk_size)) + log.debug("Attemping to split FASTA file %s into chunks of %i bytes", input_file, chunk_size) with open(input_file) as f: part_file = None try: @@ -530,7 +530,7 @@ def _size_split(cls, input_file: str, chunk_size: int, subdir_generator_function @classmethod def _count_split(cls, input_file: str, chunk_size: int, subdir_generator_function: Callable) -> None: """Split a FASTA file into chunks based on counting records.""" - log.debug("Attemping to split FASTA file %s into chunks of %i sequences" % (input_file, chunk_size)) + log.debug("Attemping to split FASTA file %s into chunks of %i sequences", input_file, chunk_size) with open(input_file) as f: part_file = None try: diff --git a/lib/galaxy/datatypes/tabular.py b/lib/galaxy/datatypes/tabular.py index b1467f3fff83..d45a11a0809a 100644 --- a/lib/galaxy/datatypes/tabular.py +++ b/lib/galaxy/datatypes/tabular.py @@ -1321,7 +1321,7 @@ def set_meta( if line: line_pieces = line.split("\t") if len(line_pieces) != 22: - raise Exception("%s:%d:Corrupt line!" % (dataset.get_file_name(), i)) + raise Exception(f"{dataset.get_file_name()}:{i}:Corrupt line!") lanes[line_pieces[2]] = 1 tiles[line_pieces[3]] = 1 barcodes[line_pieces[6]] = 1 @@ -1353,7 +1353,7 @@ def set_meta( "str", ] dataset.metadata.lanes = list(lanes.keys()) - dataset.metadata.tiles = ["%04d" % int(t) for t in tiles.keys()] + dataset.metadata.tiles = [f"{int(t):04d}" for t in tiles.keys()] dataset.metadata.barcodes = [_ for _ in barcodes.keys() if _ != "0"] + [ "NoIndex" for _ in barcodes.keys() if _ == "0" ] @@ -1477,7 +1477,7 @@ def set_meta(self, dataset: DatasetProtocol, overwrite: bool = True, **kwd) -> N except StopIteration: pass except csv.Error as e: - raise Exception("CSV reader error - line %d: %s" % (reader.line_num, e)) + raise Exception(f"CSV reader error - line {reader.line_num}: {e}") else: data_lines = reader.line_num - 1 diff --git a/lib/galaxy/datatypes/text.py b/lib/galaxy/datatypes/text.py index 95fa625aa789..dd6e289fdd4c 100644 --- a/lib/galaxy/datatypes/text.py +++ b/lib/galaxy/datatypes/text.py @@ -544,7 +544,7 @@ def set_meta(self, dataset: DatasetProtocol, overwrite: bool = True, **kwd) -> N tax_names = [] for entry in json_dict: if "taxonId" in entry: - names = "%d: %s" % (entry["taxonId"], ",".join(entry["speciesNames"])) + names = "{}: {}".format(entry["taxonId"], ",".join(entry["speciesNames"])) tax_names.append(names) dataset.metadata.taxon_names = tax_names except Exception: diff --git a/lib/galaxy/datatypes/util/gff_util.py b/lib/galaxy/datatypes/util/gff_util.py index cc574831be05..ddfa68d1fe86 100644 --- a/lib/galaxy/datatypes/util/gff_util.py +++ b/lib/galaxy/datatypes/util/gff_util.py @@ -56,11 +56,11 @@ def __init__( # Handle feature, score column. self.feature_col = feature_col if self.nfields <= self.feature_col: - raise MissingFieldError("No field for feature_col (%d)" % feature_col) + raise MissingFieldError(f"No field for feature_col ({feature_col})") self.feature = self.fields[self.feature_col] self.score_col = score_col if self.nfields <= self.score_col: - raise MissingFieldError("No field for score_col (%d)" % score_col) + raise MissingFieldError(f"No field for score_col ({score_col})") self.score = self.fields[self.score_col] # GFF attributes. diff --git a/lib/galaxy/exceptions/error_codes.py b/lib/galaxy/exceptions/error_codes.py index 31a8de485de5..fe6c3e575177 100644 --- a/lib/galaxy/exceptions/error_codes.py +++ b/lib/galaxy/exceptions/error_codes.py @@ -28,7 +28,7 @@ def __str__(self): def __repr__(self): """Return object representation of this error code.""" - return "ErrorCode[code=%d,message=%s]" % (self.code, str(self.default_error_message)) + return f"ErrorCode[code={self.code},message={str(self.default_error_message)}]" def __int__(self): """Return the error code integer.""" diff --git a/lib/galaxy/job_metrics/instrumenters/core.py b/lib/galaxy/job_metrics/instrumenters/core.py index 76cd4053ffd2..c7f95af2e7a7 100644 --- a/lib/galaxy/job_metrics/instrumenters/core.py +++ b/lib/galaxy/job_metrics/instrumenters/core.py @@ -36,9 +36,9 @@ def format(self, key: str, value: Any) -> FormattedMetric: return FormattedMetric("Container Type", value) value = int(value) if key == GALAXY_SLOTS_KEY: - return FormattedMetric("Cores Allocated", "%d" % value) + return FormattedMetric("Cores Allocated", f"{value}") elif key == GALAXY_MEMORY_MB_KEY: - return FormattedMetric("Memory Allocated (MB)", "%d" % value) + return FormattedMetric("Memory Allocated (MB)", f"{value}") elif key == RUNTIME_SECONDS_KEY: return FormattedMetric("Job Runtime (Wall Clock)", seconds_to_str(value)) else: diff --git a/lib/galaxy/jobs/__init__.py b/lib/galaxy/jobs/__init__.py index 9e4d3b65376c..c26aa11dbc0a 100644 --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -2768,8 +2768,10 @@ def finish(self, stdout, stderr, tool_exit_code=None, **kwds): # This may have ended too soon log.debug( - "task %s for job %d ended; exit code: %d" - % (self.task_id, self.job_id, tool_exit_code if tool_exit_code is not None else -256) + "task %s for job %d ended; exit code: %d", + self.task_id, + self.job_id, + tool_exit_code if tool_exit_code is not None else -256, ) # default post job setup_external_metadata task = self.get_task() diff --git a/lib/galaxy/jobs/handler.py b/lib/galaxy/jobs/handler.py index 7ec78bef89c2..ef2fb7c98cef 100644 --- a/lib/galaxy/jobs/handler.py +++ b/lib/galaxy/jobs/handler.py @@ -552,22 +552,22 @@ def __handle_waiting_jobs(self): if job_state == JOB_WAIT: new_waiting_jobs.append(job.id) elif job_state == JOB_INPUT_ERROR: - log.info("(%d) Job unable to run: one or more inputs in error state" % job.id) + log.info("(%d) Job unable to run: one or more inputs in error state", job.id) elif job_state == JOB_INPUT_DELETED: - log.info("(%d) Job unable to run: one or more inputs deleted" % job.id) + log.info("(%d) Job unable to run: one or more inputs deleted", job.id) elif job_state == JOB_READY: self.dispatcher.put(self.job_wrappers.pop(job.id)) - log.info("(%d) Job dispatched" % job.id) + log.info("(%d) Job dispatched", job.id) elif job_state == JOB_DELETED: - log.info("(%d) Job deleted by user while still queued" % job.id) + log.info("(%d) Job deleted by user while still queued", job.id) elif job_state == JOB_ADMIN_DELETED: - log.info("(%d) Job deleted by admin while still queued" % job.id) + log.info("(%d) Job deleted by admin while still queued", job.id) elif job_state in (JOB_USER_OVER_QUOTA, JOB_USER_OVER_TOTAL_WALLTIME): if job_state == JOB_USER_OVER_QUOTA: - log.info("(%d) User (%s) is over quota: job paused" % (job.id, job.user_id)) + log.info("(%d) User (%s) is over quota: job paused", job.id, job.user_id) what = "your disk quota" else: - log.info("(%d) User (%s) is over total walltime limit: job paused" % (job.id, job.user_id)) + log.info("(%d) User (%s) is over total walltime limit: job paused", job.id, job.user_id) what = "your total job runtime" job.set_state(model.Job.states.PAUSED) @@ -580,7 +580,7 @@ def __handle_waiting_jobs(self): # A more informative message is shown wherever the job state is set to error pass else: - log.error("(%d) Job in unknown state '%s'" % (job.id, job_state)) + log.error("(%d) Job in unknown state '%s'", job.id, job_state) new_waiting_jobs.append(job.id) except Exception: log.exception("failure running job %d", job.id) @@ -1277,7 +1277,7 @@ def stop(self, job, job_wrapper): def recover(self, job, job_wrapper): runner_name = (job.job_runner_name.split(":", 1))[0] - log.debug("recovering job %d in %s runner" % (job.id, runner_name)) + log.debug("recovering job %d in %s runner", job.id, runner_name) runner = self.get_job_runner(job_wrapper) try: runner.recover(job, job_wrapper) diff --git a/lib/galaxy/jobs/runners/__init__.py b/lib/galaxy/jobs/runners/__init__.py index 07a3faccd263..e169e0b658b9 100644 --- a/lib/galaxy/jobs/runners/__init__.py +++ b/lib/galaxy/jobs/runners/__init__.py @@ -129,7 +129,7 @@ def _init_worker_threads(self): self.work_threads = [] log.debug(f"Starting {self.nworkers} {self.runner_name} workers") for i in range(self.nworkers): - worker = threading.Thread(name="%s.work_thread-%d" % (self.runner_name, i), target=self.run_next) + worker = threading.Thread(name=f"{self.runner_name}.work_thread-{i}", target=self.run_next) worker.daemon = True worker.start() self.work_threads.append(worker) @@ -493,7 +493,7 @@ def _handle_metadata_externally(self, job_wrapper: "MinimalJobWrapper", resolve_ env=os.environ, preexec_fn=os.setpgrp, ) - log.debug("execution of external set_meta for job %d finished" % job_wrapper.job_id) + log.debug("execution of external set_meta for job %d finished", job_wrapper.job_id) def get_job_file(self, job_wrapper: "MinimalJobWrapper", **kwds) -> str: job_metrics = job_wrapper.app.job_metrics diff --git a/lib/galaxy/jobs/runners/aws.py b/lib/galaxy/jobs/runners/aws.py index 72b078207591..922b0aa4b522 100644 --- a/lib/galaxy/jobs/runners/aws.py +++ b/lib/galaxy/jobs/runners/aws.py @@ -341,7 +341,7 @@ def _register_job_definition(self, jd_name, container_image, destination_params) "environment": _add_galaxy_environment_variables( destination_params.get("vcpu"), destination_params.get("memory"), ), - "user": "%d:%d" % (os.getuid(), os.getgid()), + "user": f"{os.getuid()}:{os.getgid()}", "privileged": destination_params.get("privileged"), "logConfiguration": {"logDriver": "awslogs"}, } diff --git a/lib/galaxy/jobs/runners/local.py b/lib/galaxy/jobs/runners/local.py index eb1853eb01bf..1b05e594d788 100644 --- a/lib/galaxy/jobs/runners/local.py +++ b/lib/galaxy/jobs/runners/local.py @@ -171,7 +171,7 @@ def stop_job(self, job_wrapper): return pid = int(pid) if not check_pg(pid): - log.warning("stop_job(): %s: Process group %d was already dead or can't be signaled" % (job.id, pid)) + log.warning("stop_job(): %s: Process group %d was already dead or can't be signaled", job.id, pid) return log.debug("stop_job(): %s: Terminating process group %d", job.id, pid) kill_pg(pid) diff --git a/lib/galaxy/jobs/runners/pbs.py b/lib/galaxy/jobs/runners/pbs.py index 4ab56c460ff5..20d3929ae5dd 100644 --- a/lib/galaxy/jobs/runners/pbs.py +++ b/lib/galaxy/jobs/runners/pbs.py @@ -317,7 +317,7 @@ def queue_job(self, job_wrapper): pbs.pbs_disconnect(c) break errno, text = pbs.error() - log.warning("(%s) pbs_submit failed (try %d/5), PBS error %d: %s" % (galaxy_job_id, tries, errno, text)) + log.warning("(%s) pbs_submit failed (try %d/5), PBS error %d: %s", galaxy_job_id, tries, errno, text) time.sleep(2) else: log.error(f"({galaxy_job_id}) All attempts to submit job failed") @@ -386,7 +386,7 @@ def check_watched_items(self): else: # Unhandled error, continue to monitor log.info( - "(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) + "(%s/%s) PBS state check resulted in error (%d): %s", galaxy_job_id, job_id, errno, text ) new_watched.append(pbs_job_state) continue diff --git a/lib/galaxy/jobs/runners/pulsar.py b/lib/galaxy/jobs/runners/pulsar.py index a2bb8ff6c76b..51628d29b229 100644 --- a/lib/galaxy/jobs/runners/pulsar.py +++ b/lib/galaxy/jobs/runners/pulsar.py @@ -723,11 +723,13 @@ def check_pid(self, pid): return True except OSError as e: if e.errno == errno.ESRCH: - log.debug("check_pid(): PID %d is dead" % pid) + log.debug("check_pid(): PID %d is dead", pid) else: log.warning( - "check_pid(): Got errno %s when attempting to check PID %d: %s" - % (errno.errorcode[e.errno], pid, e.strerror) + "check_pid(): Got errno %s when attempting to check PID %d: %s", + errno.errorcode[e.errno], + pid, + e.strerror, ) return False @@ -747,23 +749,27 @@ def stop_job(self, job_wrapper): return pid = int(pid) if not self.check_pid(pid): - log.warning("stop_job(): %s: PID %d was already dead or can't be signaled" % (job.id, pid)) + log.warning("stop_job(): %s: PID %d was already dead or can't be signaled", job.id, pid) return for sig in [15, 9]: try: os.killpg(pid, sig) except OSError as e: log.warning( - "stop_job(): %s: Got errno %s when attempting to signal %d to PID %d: %s" - % (job.id, errno.errorcode[e.errno], sig, pid, e.strerror) + "stop_job(): %s: Got errno %s when attempting to signal %d to PID %d: %s", + job.id, + errno.errorcode[e.errno], + sig, + pid, + e.strerror, ) return # give up sleep(2) if not self.check_pid(pid): - log.debug("stop_job(): %s: PID %d successfully killed with signal %d" % (job.id, pid, sig)) + log.debug("stop_job(): %s: PID %d successfully killed with signal %d", job.id, pid, sig) return else: - log.warning("stop_job(): %s: PID %d refuses to die after signaling TERM/KILL" % (job.id, pid)) + log.warning("stop_job(): %s: PID %d refuses to die after signaling TERM/KILL", job.id, pid) else: # Remote kill pulsar_url = job.job_runner_name diff --git a/lib/galaxy/jobs/runners/tasks.py b/lib/galaxy/jobs/runners/tasks.py index 70088bb08e8d..664880acfdf4 100644 --- a/lib/galaxy/jobs/runners/tasks.py +++ b/lib/galaxy/jobs/runners/tasks.py @@ -100,7 +100,7 @@ def queue_job(self, job_wrapper): task_state = tw.get_state() if model.Task.states.ERROR == task_state: job_exit_code = tw.get_exit_code() - log.debug("Canceling job %d: Task %s returned an error" % (tw.job_id, tw.task_id)) + log.debug("Canceling job %d: Task %s returned an error", tw.job_id, tw.task_id) self._cancel_job(job_wrapper, task_wrappers) tasks_complete = True break @@ -196,9 +196,7 @@ def _cancel_job(self, job_wrapper, task_wrappers): task = task_wrapper.get_task() task_state = task.get_state() if model.Task.states.QUEUED == task_state: - log.debug( - "_cancel_job for job %d: Task %d is not running; setting state to DELETED" % (job.id, task.id) - ) + log.debug("_cancel_job for job %d: Task %d is not running; setting state to DELETED", job.id, task.id) task_wrapper.change_state(task.states.DELETED) # If a task failed, then the caller will have waited a few seconds # before recognizing the failure. In that time, a queued task could @@ -209,7 +207,7 @@ def _cancel_job(self, job_wrapper, task_wrappers): for task_wrapper in task_wrappers: if model.Task.states.RUNNING == task_wrapper.get_state(): task = task_wrapper.get_task() - log.debug("_cancel_job for job %d: Stopping running task %d" % (job.id, task.id)) + log.debug("_cancel_job for job %d: Stopping running task %d", job.id, task.id) job_wrapper.app.job_manager.job_handler.dispatcher.stop(task) def _check_pid(self, pid): @@ -226,7 +224,7 @@ def _stop_pid(self, pid, job_id): pid = int(pid) log.debug(f"Stopping pid {pid}") if not self._check_pid(pid): - log.warning("_stop_pid(): %s: PID %d was already dead or can't be signaled" % (job_id, pid)) + log.warning("_stop_pid(): %s: PID %d was already dead or can't be signaled", job_id, pid) return for sig in [15, 9]: try: @@ -235,15 +233,19 @@ def _stop_pid(self, pid, job_id): # This warning could be bogus; many tasks are stopped with # SIGTERM (signal 15), but ymmv depending on the platform. log.warning( - "_stop_pid(): %s: Got errno %s when attempting to signal %d to PID %d: %s" - % (job_id, errno.errorcode[e.errno], sig, pid, e.strerror) + "_stop_pid(): %s: Got errno %s when attempting to signal %d to PID %d: %s", + job_id, + errno.errorcode[e.errno], + sig, + pid, + e.strerror, ) return # TODO: If we're stopping lots of tasks, then we will want to put this # avoid a two-second overhead using some other asynchronous method. sleep(2) if not self._check_pid(pid): - log.debug("_stop_pid(): %s: PID %d successfully killed with signal %d" % (job_id, pid, sig)) + log.debug("_stop_pid(): %s: PID %d successfully killed with signal %d", job_id, pid, sig) return else: - log.warning("_stop_pid(): %s: PID %d refuses to die after signaling TERM/KILL" % (job_id, pid)) + log.warning("_stop_pid(): %s: PID %d refuses to die after signaling TERM/KILL", job_id, pid) diff --git a/lib/galaxy/jobs/splitters/multi.py b/lib/galaxy/jobs/splitters/multi.py index afc5d289b17d..ea0ce27a1753 100644 --- a/lib/galaxy/jobs/splitters/multi.py +++ b/lib/galaxy/jobs/splitters/multi.py @@ -38,7 +38,7 @@ def do_split(job_wrapper): task_dirs = [] def get_new_working_directory_name(): - dir = os.path.join(working_directory, "task_%d" % subdir_index[0]) + dir = os.path.join(working_directory, f"task_{subdir_index[0]}") subdir_index[0] = subdir_index[0] + 1 if not os.path.exists(dir): os.makedirs(dir) @@ -93,7 +93,7 @@ def get_new_working_directory_name(): log_error = f"The type '{str(input_type)}' does not define a method for splitting files" log.error(log_error) raise - log.debug("do_split created %d parts" % len(task_dirs)) + log.debug("do_split created %d parts", len(task_dirs)) # next, after we know how many divisions there are, add the shared inputs via soft links for input in parent_job.input_datasets: if input and input.name in shared_inputs: @@ -154,13 +154,15 @@ def do_merge(job_wrapper, task_wrappers): log.debug(f"files {output_files} ") if len(output_files) < len(task_dirs): log.debug( - "merging only %i out of expected %i files for %s" - % (len(output_files), len(task_dirs), output_file_name) + "merging only %i out of expected %i files for %s", + len(output_files), + len(task_dirs), + output_file_name, ) output_type.merge(output_files, output_file_name) log.debug(f"merge finished: {output_file_name}") else: - msg = "nothing to merge for %s (expected %i files)" % (output_file_name, len(task_dirs)) + msg = f"nothing to merge for {output_file_name} (expected {len(task_dirs)} files)" log.debug(msg) stderr += f"{msg}\n" elif output in pickone_outputs: diff --git a/lib/galaxy/managers/collections.py b/lib/galaxy/managers/collections.py index a809f0214289..fae6d0563347 100644 --- a/lib/galaxy/managers/collections.py +++ b/lib/galaxy/managers/collections.py @@ -249,7 +249,7 @@ def _create_instance_for_collection( if implicit_output_name: dataset_collection_instance.implicit_output_name = implicit_output_name - log.debug("Created collection with %d elements" % (len(dataset_collection_instance.collection.elements))) + log.debug("Created collection with %d elements", len(dataset_collection_instance.collection.elements)) if set_hid: parent.add_dataset_collection(dataset_collection_instance) diff --git a/lib/galaxy/managers/collections_util.py b/lib/galaxy/managers/collections_util.py index d0ca89f61626..32feab23889d 100644 --- a/lib/galaxy/managers/collections_util.py +++ b/lib/galaxy/managers/collections_util.py @@ -47,7 +47,7 @@ def validate_input_element_identifiers(element_identifiers): """Scan through the list of element identifiers supplied by the API consumer and verify the structure is valid. """ - log.debug("Validating %d element identifiers for collection creation." % len(element_identifiers)) + log.debug("Validating %d element identifiers for collection creation.", len(element_identifiers)) identifier_names = set() for element_identifier in element_identifiers: if "__object__" in element_identifier: diff --git a/lib/galaxy/managers/display_applications.py b/lib/galaxy/managers/display_applications.py index 0a64ef85cc74..3c8ceb9b122f 100644 --- a/lib/galaxy/managers/display_applications.py +++ b/lib/galaxy/managers/display_applications.py @@ -72,17 +72,18 @@ def reload(self, ids: List[str]) -> ReloadFeedback: ) reloaded, failed = self.datatypes_registry.reload_display_applications(ids) if not reloaded and failed: - message = 'Unable to reload any of the %i requested display applications ("%s").' % ( + message = 'Unable to reload any of the {} requested display applications ("{}").'.format( len(failed), '", "'.join(failed), ) elif failed: message = ( - 'Reloaded %i display applications ("%s"), but failed to reload %i display applications ("%s").' - % (len(reloaded), '", "'.join(reloaded), len(failed), '", "'.join(failed)) + 'Reloaded {} display applications ("{}"), but failed to reload {} display applications ("{}").'.format( + len(reloaded), '", "'.join(reloaded), len(failed), '", "'.join(failed) + ) ) elif not reloaded: message = "You need to request at least one display application to reload." else: - message = 'Reloaded %i requested display applications ("%s").' % (len(reloaded), '", "'.join(reloaded)) + message = 'Reloaded {} requested display applications ("{}").'.format(len(reloaded), '", "'.join(reloaded)) return ReloadFeedback(message=message, reloaded=reloaded, failed=failed) diff --git a/lib/galaxy/managers/markdown_parse.py b/lib/galaxy/managers/markdown_parse.py index 7eeef2ba482d..7f3110da3cd4 100644 --- a/lib/galaxy/managers/markdown_parse.py +++ b/lib/galaxy/managers/markdown_parse.py @@ -86,12 +86,12 @@ class DynamicArguments: def validate_galaxy_markdown(galaxy_markdown, internal=True): """Validate the supplied markdown and throw an ValueError with reason if invalid.""" - def invalid_line(template, line_no, **kwd): + def invalid_line(template, line_no: int, **kwd): if "line" in kwd: kwd["line"] = kwd["line"].rstrip("\r\n") - raise ValueError("Invalid line %d: %s" % (line_no + 1, template.format(**kwd))) + raise ValueError(f"Invalid line {line_no + 1}: {template.format(**kwd)}") - def _validate_arg(arg_str, valid_args, line_no): + def _validate_arg(arg_str, valid_args, line_no: int): if arg_str is not None: arg_name = arg_str.split("=", 1)[0].strip() if arg_name not in valid_args and arg_name not in SHARED_ARGUMENTS: diff --git a/lib/galaxy/managers/markdown_util.py b/lib/galaxy/managers/markdown_util.py index 02545399e997..ba32b6489624 100644 --- a/lib/galaxy/managers/markdown_util.py +++ b/lib/galaxy/managers/markdown_util.py @@ -94,7 +94,7 @@ def _remap(container, line): if id_match := re.search(ENCODED_ID_PATTERN, line): object_id = id_match.group(2) decoded_id = trans.security.decode_id(object_id) - line = line.replace(id_match.group(), "%s=%d" % (id_match.group(1), decoded_id)) + line = line.replace(id_match.group(), f"{id_match.group(1)}={decoded_id}") return (line, False) internal_markdown = _remap_galaxy_markdown_calls(_remap, external_galaxy_markdown) @@ -594,7 +594,7 @@ def handle_workflow_display(self, line, stored_workflow, workflow_version: Optio workflow = stored_workflow.get_internal_version(workflow_version) for order_index, step in enumerate(workflow.steps): annotation = get_item_annotation_str(self.trans.sa_session, self.trans.user, step) or "" - markdown += "|{}|{}|\n".format(step.label or "Step %d" % (order_index + 1), annotation) + markdown += "|{}|{}|\n".format(step.label or f"Step {order_index + 1}", annotation) markdown += "\n---\n" return (markdown, True) diff --git a/lib/galaxy/managers/sharable.py b/lib/galaxy/managers/sharable.py index cfdcda372e97..1d6c447cd59b 100644 --- a/lib/galaxy/managers/sharable.py +++ b/lib/galaxy/managers/sharable.py @@ -335,7 +335,7 @@ def get_unique_slug(self, item): while importable_item_slug_exists(self.session(), item.__class__, item.user, new_slug): # Slug taken; choose a new slug based on count. This approach can # handle numerous items with the same name gracefully. - new_slug = "%s-%i" % (slug_base, count) + new_slug = f"{slug_base}-{count}" count += 1 return new_slug diff --git a/lib/galaxy/managers/workflows.py b/lib/galaxy/managers/workflows.py index 20354c029198..dc00e872b75b 100644 --- a/lib/galaxy/managers/workflows.py +++ b/lib/galaxy/managers/workflows.py @@ -738,9 +738,10 @@ def update_workflow_from_raw_description( ) if missing_tool_tups and not workflow_update_options.allow_missing_tools: - errors = [] - for missing_tool_tup in missing_tool_tups: - errors.append("Step %i: Requires tool '%s'." % (int(missing_tool_tup[3]) + 1, missing_tool_tup[0])) + errors = [ + f"Step {int(missing_tool_tup[3]) + 1}: Requires tool '{missing_tool_tup[0]}'." + for missing_tool_tup in missing_tool_tups + ] raise MissingToolsException(workflow, errors) # Connect up @@ -1107,7 +1108,7 @@ def row_for_param(input_dict, param, raw_value, other_values, prefix, step): if not isinstance(conns, list): conns = [conns] value_list = [ - "Output '%s' from Step %d." % (conn.output_name, int(conn.output_step.order_index) + 1) + f"Output '{conn.output_name}' from Step {int(conn.output_step.order_index) + 1}." for conn in conns ] value = ",".join(value_list) @@ -1132,7 +1133,7 @@ def do_inputs(inputs, values, prefix, step, other_values=None): for i in range(len(repeat_values)): nested_input_dict = {} index = repeat_values[i]["__index__"] - nested_input_dict["title"] = "%i. %s" % (i + 1, input.title) + nested_input_dict["title"] = f"{i + 1}. {input.title}" try: nested_input_dict["inputs"] = do_inputs( input.inputs, diff --git a/lib/galaxy/model/__init__.py b/lib/galaxy/model/__init__.py index 4dc18f87b1c9..e39c76e5d9cc 100644 --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -1171,7 +1171,7 @@ def _calculate_or_set_disk_usage(self, object_store): sa_session.commit() @staticmethod - def user_template_environment(user): + def user_template_environment(user: Optional["User"]): """ >>> env = User.user_template_environment(None) @@ -1189,7 +1189,7 @@ def user_template_environment(user): 'foo2' """ if user: - user_id = "%d" % user.id + user_id = f"{user.id}" user_email = str(user.email) user_name = str(user.username) else: @@ -1197,7 +1197,7 @@ def user_template_environment(user): user_id = "Anonymous" user_email = "Anonymous" user_name = "Anonymous" - environment = {} + environment: Dict[str, Any] = {} environment["__user__"] = user environment["__user_id__"] = environment["userId"] = user_id environment["__user_email__"] = environment["userEmail"] = user_email @@ -1205,7 +1205,7 @@ def user_template_environment(user): return environment @staticmethod - def expand_user_properties(user, in_string): + def expand_user_properties(user, in_string: str): """ """ environment = User.user_template_environment(user) return Template(in_string).safe_substitute(environment) @@ -2381,7 +2381,7 @@ def set_job_runner_external_id(self, task_runner_external_id): # This method is available for runners that do not want/need to # differentiate between the kinds of Runnable things (Jobs and Tasks) # that they're using. - log.debug("Task %d: Set external id to %s" % (self.id, task_runner_external_id)) + log.debug("Task %d: Set external id to %s", self.id, task_runner_external_id) self.task_runner_external_id = task_runner_external_id def set_task_runner_external_id(self, task_runner_external_id): @@ -8069,7 +8069,7 @@ def log_str(self): extra = "" if self.stored_workflow: extra = f",name={self.stored_workflow.name}" - return "Workflow[id=%d%s]" % (self.id, extra) + return f"Workflow[id={self.id}{extra}]" InputConnDictType = Dict[str, Union[Dict[str, Any], List[Dict[str, Any]]]] @@ -9959,7 +9959,7 @@ def get_file_name(self, sync_cache: bool = True) -> str: if e.errno != errno.EEXIST: raise # Return filename inside hashed directory - return os.path.abspath(os.path.join(path, "metadata_%d.dat" % self.id)) + return os.path.abspath(os.path.join(path, f"metadata_{self.id}.dat")) def _serialize(self, id_encoder, serialization_options): as_dict = dict_for(self) @@ -10744,7 +10744,7 @@ class Tag(Base, RepresentById): parent: Mapped[Optional["Tag"]] = relationship(back_populates="children", remote_side=[id]) def __str__(self): - return "Tag(id=%s, type=%i, parent_id=%s, name=%s)" % (self.id, self.type or -1, self.parent_id, self.name) + return f"Tag(id={self.id}, type={self.type or -1}, parent_id={self.parent_id}, name={self.name})" class ItemTagAssociation(Dictifiable): diff --git a/lib/galaxy/model/orm/engine_factory.py b/lib/galaxy/model/orm/engine_factory.py index 3effa9ed8e00..877e39ca69f1 100644 --- a/lib/galaxy/model/orm/engine_factory.py +++ b/lib/galaxy/model/orm/engine_factory.py @@ -43,10 +43,7 @@ def stripwd(s): def pretty_stack(): - rval = [] - for _, fname, line, funcname, _, _ in inspect.stack()[2:]: - rval.append("%s:%s@%d" % (stripwd(fname), funcname, line)) - return rval + return [f"{stripwd(fname)}:{funcname}@{line}" for _, fname, line, funcname, _, _ in inspect.stack()[2:]] def build_engine( diff --git a/tox.ini b/tox.ini index bf7909a66f1c..ab1153917d2e 100644 --- a/tox.ini +++ b/tox.ini @@ -46,6 +46,8 @@ setenv = first_startup: GALAXY_CONFIG_DATABASE_AUTO_MIGRATE=true mulled: marker=external_dependency_management mulled,unit: GALAXY_VIRTUAL_ENV={envdir} + mypy,test_galaxy_packages,test_galaxy_packages_for_pulsar: PIP_INDEX_URL=https://wheels.galaxyproject.org/simple + mypy,test_galaxy_packages,test_galaxy_packages_for_pulsar: PIP_EXTRA_INDEX_URL=https://pypi.python.org/simple unit: GALAXY_ENABLE_BETA_COMPRESSED_GENBANK_SNIFFING=1 unit: marker=not external_dependency_management deps =