Skip to content

Commit

Permalink
Minor bug fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
coreyjadams committed Apr 7, 2022
1 parent 077cddf commit ccad2eb
Show file tree
Hide file tree
Showing 6 changed files with 11 additions and 351 deletions.
5 changes: 0 additions & 5 deletions src/config/SCC_21.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,3 @@ defaults:
- _self_
data:
downsample: 0
mode:
optimizer:
loss_balance_scheme: light


85 changes: 0 additions & 85 deletions src/utils/core/larcvio/io_templates.py

This file was deleted.

8 changes: 4 additions & 4 deletions src/utils/core/larcvio/larcv_fetcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import logging
logger = logging.getLogger("cosmictagger")

from larcv.config_builder import ConfigBuilder

class larcv_fetcher(object):

Expand Down Expand Up @@ -95,6 +94,7 @@ def prepare_cosmic_sample(self, name, input_file, batch_size, color=None):
raise Exception(f"File {input_file} not found")


from larcv.config_builder import ConfigBuilder
cb = ConfigBuilder()
cb.set_parameter([str(input_file)], "InputFiles")
cb.set_parameter(5, "ProcessDriver", "IOManager", "Verbosity")
Expand Down Expand Up @@ -159,9 +159,9 @@ def prepare_cosmic_sample(self, name, input_file, batch_size, color=None):

self._larcv_interface.prepare_manager(name, io_config, batch_size, data_keys, color=color)


if self.mode == "inference":
self._larcv_interface.set_next_index(name, start_index)
#
# if self.mode == "inference":
# self._larcv_interface.set_next_index(name, start_index)

# This queues up the next data
# self._larcv_interface.prepare_next(name)
Expand Down
256 changes: 0 additions & 256 deletions src/utils/core/larcvio/larcv_io.py

This file was deleted.

2 changes: 2 additions & 0 deletions src/utils/tensorflow2/distributed_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,13 @@ def __init__(self, args):
self._size = hvd.size()

def init_optimizer(self):

# with tf.variable_scope("hvd"):

# # In the distributed case, we may want a learning rate behavior:
# self._learning_rate = self.generate_learning_rate(self.args.learning_rate, self._global_step)
tf_trainer.init_optimizer(self)
if self.args.mode.name != ModeKind.train: return

# Wrap the optimizer it in horovod:
# self._opt = hvd.DistributedOptimizer(self._opt)
Expand Down
Loading

0 comments on commit ccad2eb

Please sign in to comment.