diff --git a/README.md b/README.md index 5e71484..714b626 100644 --- a/README.md +++ b/README.md @@ -41,21 +41,6 @@ $ python main.py ### Sample Output: ``` -====== End Balance Table 500 Runs ====== - start_balance contribution end_balance average_return -0 1000 250000 530956.0 4.860000 -1 1000 250000 5340032.0 12.386531 -2 1000 250000 4630970.0 9.062449 -3 1000 250000 998079.0 4.510000 -4 1000 250000 826731.0 7.439388 -.. ... ... ... ... -495 1000 250000 1861456.0 9.094082 -496 1000 250000 5137429.0 12.177347 -497 1000 250000 748437.0 6.220408 -498 1000 250000 1839859.0 7.693673 -499 1000 250000 2431553.0 7.890612 - -[500 rows x 4 columns] ====== End Balance Summary Table 500 Runs ====== start_balance contribution end_balance average_return count 500.0 500.0 500.0 500.0 @@ -157,5 +142,4 @@ The `test_summary.py` program reads every summary statistics csv file and verifi After each run of the experiment, the success rate is appended to the `success_rate.csv` file. The file is created automatically if it does not exist. Running `main.py` 100 times will generate a single csv file containing 100 lines. -The `test_success_rate.py` program will read the `success_rate.csv` file and verify the success rate's minimum and maximum values to be within an expected range. -I choose to verify the min/smallest and max/largest values rather than the mean so the test more sensitive to changes in the experiment. +The `test_success_rate.py` program will read the `success_rate.csv` file and verify the success rate's mean and standard deviation are within an expected range. diff --git a/main.py b/main.py index 50fa4ee..42826b5 100644 --- a/main.py +++ b/main.py @@ -72,8 +72,6 @@ df_all_stats.round(4) # output End Balance summary table -print("=" * 6 + " End Balance Table %s Runs " %(num_reps) + "=" * 6) -print(df_all_stats) print("=" * 6 + " End Balance Summary Table %s Runs " %(num_reps) + "=" * 6) print(df_all_stats.describe().round(0)) print("=" * 60) diff --git a/test_success_rate.py b/test_success_rate.py index 4a86f38..24eb433 100644 --- a/test_success_rate.py +++ b/test_success_rate.py @@ -10,11 +10,11 @@ df_summary = df.describe() #print(df_summary) -success_rate_min = df_summary.at['min', 2] -success_rate_max = df_summary.at['max', 2] +success_rate_mean = df_summary.at['mean', 2] +success_rate_std = df_summary.at['std', 2] -assert success_rate_min > .62, f"success rate min is too small, got {success_rate_min}" -assert success_rate_min < .65, f"success rate min is too large, got {success_rate_min}" +assert success_rate_mean > .66, f"success rate mean is too small, got {success_rate_mean}" +assert success_rate_mean < .70, f"success rate mean is too large, got {success_rate_mean}" -assert success_rate_max > .719, f"success rate max is too small, got {success_rate_max}" -assert success_rate_max < .731, f"success rate max is too large, got {success_rate_max}" +assert success_rate_std > .020, f"success rate std is too small, got {success_rate_std}" +assert success_rate_std < .024, f"success rate std is too large, got {success_rate_std}" diff --git a/test_summary.py b/test_summary.py index 24dbb7c..0fab792 100644 --- a/test_summary.py +++ b/test_summary.py @@ -20,7 +20,7 @@ assert end_balance_mean > 2100000, f"end balance mean is too small, got {end_balance_mean}" assert end_balance_mean < 3150000, f"end balance mean is too large, got {end_balance_mean}" - assert end_balance_std > 2160000, f"end balance std is too small, got {end_balance_std}" + assert end_balance_std > 2100000, f"end balance std is too small, got {end_balance_std}" assert end_balance_std < 8130000, f"end balance std is too large, got {end_balance_std}" assert average_return_mean > 7.4, f"average return mean is too small, got {average_return_mean}"