Skip to content

Commit

Permalink
Minimal Fix for Broken Tests (#130)
Browse files Browse the repository at this point in the history
* typo

* Minimal Fix for Tests
  • Loading branch information
LarsKue authored Feb 20, 2024
1 parent ccfd11a commit fe63666
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 37 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/docs.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# From https://github.com/eeholmes/readthedoc-test/blob/main/.github/workflows/docs_pages.yml
name: docs

# execute this workflow automatically when a we push to master
# execute this workflow automatically when we push to master
on:
push:
branches:
Expand Down
5 changes: 2 additions & 3 deletions tests/test_sensitivity.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from copy import deepcopy
from functools import partial
from unittest.mock import MagicMock, Mock

import numpy as np
import pytest
import tensorflow as tf

from copy import deepcopy

from bayesflow import computational_utilities, sensitivity, simulation
from tests.test_trainers import _create_training_setup, _prior, _simulator

Expand All @@ -26,7 +25,7 @@ def _trainer_amortizer_sample_mock(input_dict, n_samples):


class TestMisspecificationExperiment:
def setup(self):
def setup_method(self):
self.trainer = _create_training_setup(mode="posterior")

# Mock the approximate posterior sampling of the amortizer, return np.ones of shape (n_sim, n_samples)
Expand Down
35 changes: 2 additions & 33 deletions tests/test_trainers.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,9 @@ def _create_training_setup(mode):
trainer = Trainer(generative_model=model, amortizer=amortizer)
return trainer


class TestTrainer:
def setup(self):
def setup_method(self):
trainer_posterior = _create_training_setup("posterior")
trainer_likelihood = _create_training_setup("likelihood")
trainer_joint = _create_training_setup("joint")
Expand Down Expand Up @@ -111,7 +112,6 @@ def test_train_online(self, mode, reuse_optimizer, validation_sims):
assert type(h["train_losses"]) is DataFrame
assert type(h["val_losses"]) is DataFrame


@pytest.mark.parametrize("mode", ["posterior", "joint"])
@pytest.mark.parametrize("reuse_optimizer", [True, False])
@pytest.mark.parametrize("validation_sims", [20, None])
Expand Down Expand Up @@ -202,34 +202,3 @@ def test_train_rounds(self, mode, reuse_optimizer, validation_sims):
assert type(h) is dict
assert type(h["train_losses"]) is DataFrame
assert type(h["val_losses"]) is DataFrame

@pytest.mark.parametrize("reference_data", [None, "dict", "numpy"])
@pytest.mark.parametrize("observed_data_type", ["dict", "numpy"])
@pytest.mark.parametrize("bootstrap", [True, False])
def mmd_hypothesis_test_no_reference(self, reference_data, observed_data_type, bootstrap):
trainer = self.trainers["posterior"]
_ = trainer.train_online(epochs=1, iterations_per_epoch=1, batch_size=4)

num_reference_simulations = 10
num_observed_simulations = 2
num_null_samples = 5

if reference_data is None:
if reference_data == "dict":
reference_data = trainer.configurator(trainer.generative_model(num_reference_simulations))
elif reference_data == "numpy":
reference_data = trainer.configurator(trainer.generative_model(num_reference_simulations))['summary_conditions']

if observed_data_type == "dict":
observed_data = trainer.configurator(trainer.generative_model(num_observed_simulations))
elif observed_data_type == "numpy":
observed_data = trainer.configurator(trainer.generative_model(num_observed_simulations))['summary_conditions']

MMD_sampling_distribution, MMD_observed = trainer.mmd_hypothesis_test(observed_data=observed_data,
reference_data=reference_data,
num_reference_simulations=num_reference_simulations,
num_null_samples=num_null_samples,
bootstrap=bootstrap)

assert MMD_sampling_distribution.shape[0] == num_reference_simulations
assert np.all(MMD_sampling_distribution > 0)

0 comments on commit fe63666

Please sign in to comment.