Skip to content

Commit

Permalink
Set save_summary_steps and save_checkpoints_steps to 100.
Browse files Browse the repository at this point in the history
  • Loading branch information
lizlooney committed Jun 26, 2021
1 parent f13895b commit 68b4add
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 4 deletions.
3 changes: 1 addition & 2 deletions research/object_detection/model_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -1023,8 +1023,7 @@ def terminate_eval():
return True

for ckpt in tf.train.checkpoints_iterator(
model_dir, min_interval_secs=180, timeout=None,
timeout_fn=terminate_eval):
model_dir, min_interval_secs=1, timeout=None, timeout_fn=None):

tf.logging.info('Starting Evaluation.')
try:
Expand Down
5 changes: 4 additions & 1 deletion research/object_detection/model_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir)
config = tf.estimator.RunConfig(model_dir=FLAGS.model_dir,
save_summary_steps=100,
save_checkpoints_steps=100,
save_checkpoints_secs=None)

train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
Expand Down
5 changes: 4 additions & 1 deletion research/object_detection/model_tpu_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,10 @@ def main(unused_argv):
model_dir=FLAGS.model_dir,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards))
num_shards=FLAGS.num_shards),
save_summary_steps=100,
save_checkpoints_steps=100,
save_checkpoints_secs=None)

kwargs = {}
if FLAGS.train_batch_size:
Expand Down

0 comments on commit 68b4add

Please sign in to comment.