diff --git a/docs/notebooks/demo_UncertaintiesAndRandomization.ipynb b/docs/notebooks/demo_UncertaintiesAndRandomization.ipynb index bc7bee6c..caffab75 100644 --- a/docs/notebooks/demo_UncertaintiesAndRandomization.ipynb +++ b/docs/notebooks/demo_UncertaintiesAndRandomization.ipynb @@ -27,7 +27,8 @@ "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", - "from astropy.coordinates import SkyCoord" + "from astropy.coordinates import SkyCoord\n", + "from sorcha.utilities.sorchaConfigs import expertConfigs" ] }, { @@ -99,6 +100,8 @@ "outputs": [], "source": [ "configs = {'trailing_losses_on':True, 'default_SNR_cut': False}\n", + "configs = expertConfigs(**configs)\n", + "setattr(configs, \"expert\", configs)\n", "rng = PerModuleRNG(2012)" ] }, @@ -317,7 +320,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "sorcha", "language": "python", "name": "python3" }, @@ -331,7 +334,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.10.15" } }, "nbformat": 4, diff --git a/src/sorcha/ephemeris/simulation_driver.py b/src/sorcha/ephemeris/simulation_driver.py index 1bea22c8..7521c9c9 100644 --- a/src/sorcha/ephemeris/simulation_driver.py +++ b/src/sorcha/ephemeris/simulation_driver.py @@ -47,7 +47,7 @@ def get_vec(row, vecname): return np.asarray([row[f"{vecname}_x"], row[f"{vecname}_y"], row[f"{vecname}_z"]]) -def create_ephemeris(orbits_df, pointings_df, args, configs): +def create_ephemeris(orbits_df, pointings_df, args, sconfigs): """Generate a set of observations given a collection of orbits and set of pointings. @@ -59,7 +59,8 @@ def create_ephemeris(orbits_df, pointings_df, args, configs): The dataframe containing the collection of telescope/camera pointings. args : Various arguments necessary for the calculation - configs : dictionary + sconfigs: + Dataclass of configuration file arguments. Various configuration parameters necessary for the calculation ang_fov : float The angular size (deg) of the field of view @@ -103,11 +104,11 @@ def create_ephemeris(orbits_df, pointings_df, args, configs): """ verboselog = args.pplogger.info if args.verbose else lambda *a, **k: None - ang_fov = configs["ar_ang_fov"] - buffer = configs["ar_fov_buffer"] - picket_interval = configs["ar_picket"] - obsCode = configs["ar_obs_code"] - nside = 2 ** configs["ar_healpix_order"] + ang_fov = sconfigs.simulation.ar_ang_fov + buffer = sconfigs.simulation.ar_fov_buffer + picket_interval = sconfigs.simulation.ar_picket + obsCode = sconfigs.simulation.ar_obs_code + nside = 2**sconfigs.simulation.ar_healpix_order n_sub_intervals = 101 # configs["n_sub_intervals"] ephemeris_csv_filename = None @@ -221,7 +222,7 @@ def create_ephemeris(orbits_df, pointings_df, args, configs): # if the user has defined an output file name for the ephemeris results, write out to that file if ephemeris_csv_filename: verboselog("Writing out ephemeris results to file.") - write_out_ephemeris_file(ephemeris_df, ephemeris_csv_filename, args, configs) + write_out_ephemeris_file(ephemeris_df, ephemeris_csv_filename, args, sconfigs) # join the ephemeris and input orbits dataframe, take special care to make # sure the 'ObjID' column types match. @@ -328,7 +329,7 @@ def calculate_rates_and_geometry(pointing: pd.DataFrame, ephem_geom_params: Ephe ) -def write_out_ephemeris_file(ephemeris_df, ephemeris_csv_filename, args, configs): +def write_out_ephemeris_file(ephemeris_df, ephemeris_csv_filename, args, sconfigs): """Writes the ephemeris out to an external file. Parameters @@ -342,8 +343,8 @@ def write_out_ephemeris_file(ephemeris_df, ephemeris_csv_filename, args, configs args: sorchaArguments object or similar Command-line arguments from Sorcha. - configs: dict - Dictionary of configuration file arguments. + sconfigs: dataclass + Dataclass of configuration file arguments. Returns ------- @@ -352,12 +353,12 @@ def write_out_ephemeris_file(ephemeris_df, ephemeris_csv_filename, args, configs verboselog = args.pplogger.info if args.verbose else lambda *a, **k: None - if configs["eph_format"] == "csv": + if sconfigs.input.eph_format == "csv": verboselog("Outputting ephemeris to CSV file...") PPOutWriteCSV(ephemeris_df, ephemeris_csv_filename + ".csv") - elif configs["eph_format"] == "whitespace": + elif sconfigs.input.eph_format == "whitespace": verboselog("Outputting ephemeris to whitespaced CSV file...") PPOutWriteCSV(ephemeris_df, ephemeris_csv_filename + ".csv", separator=" ") - elif configs["eph_format"] == "hdf5" or configs["output_format"] == "h5": + elif sconfigs.input.eph_format == "hdf5" or sconfigs.output.output_format == "h5": verboselog("Outputting ephemeris to HDF5 binary file...") PPOutWriteHDF5(ephemeris_df, ephemeris_csv_filename + ".h5", "sorcha_ephemeris") diff --git a/src/sorcha/ephemeris/simulation_setup.py b/src/sorcha/ephemeris/simulation_setup.py index d3ff62c7..3a21782c 100644 --- a/src/sorcha/ephemeris/simulation_setup.py +++ b/src/sorcha/ephemeris/simulation_setup.py @@ -163,7 +163,7 @@ def generate_simulations(ephem, gm_sun, gm_total, orbits_df, args): return sim_dict -def precompute_pointing_information(pointings_df, args, configs): +def precompute_pointing_information(pointings_df, args, sconfigs): """This function is meant to be run once to prime the pointings dataframe with additional information that Assist & Rebound needs for it's work. @@ -173,8 +173,8 @@ def precompute_pointing_information(pointings_df, args, configs): Contains the telescope pointing database. args : dictionary Command line arguments needed for initialization. - configs : dictionary - Configuration settings. + sconfigs: dataclass + Dataclass of configuration file arguments. Returns -------- @@ -184,7 +184,7 @@ def precompute_pointing_information(pointings_df, args, configs): ephem, _, _ = create_assist_ephemeris(args) furnish_spiceypy(args) - obsCode = configs["ar_obs_code"] + obsCode = sconfigs.simulation.ar_obs_code observatories = Observatory(args) # vectorize the calculation to get x,y,z vector from ra/dec @@ -204,8 +204,8 @@ def precompute_pointing_information(pointings_df, args, configs): # create a partial function since most params don't change, and it makes the lambda easier to read partial_get_hp_neighbors = partial( get_hp_neighbors, - search_radius=configs["ar_ang_fov"] + configs["ar_fov_buffer"], - nside=2 ** configs["ar_healpix_order"], + search_radius=sconfigs.simulation.ar_ang_fov + sconfigs.simulation.ar_fov_buffer, + nside=2**sconfigs.simulation.ar_healpix_order, nested=True, ) diff --git a/src/sorcha/modules/PPAddUncertainties.py b/src/sorcha/modules/PPAddUncertainties.py index 0004fb12..04ac7217 100755 --- a/src/sorcha/modules/PPAddUncertainties.py +++ b/src/sorcha/modules/PPAddUncertainties.py @@ -61,7 +61,7 @@ def degSin(x): return np.sin(x * np.pi / 180.0) -def addUncertainties(detDF, configs, module_rngs, verbose=True): +def addUncertainties(detDF, sconfigs, module_rngs, verbose=True): """ Generates astrometric and photometric uncertainties, and SNR. Uses uncertainties to randomize the photometry. Accounts for trailing losses. @@ -80,8 +80,8 @@ def addUncertainties(detDF, configs, module_rngs, verbose=True): detDF : Pandas dataframe) Dataframe of observations. - configs : dictionary - dictionary of configurations from config file. + sconfigs: dataclass + Dataclass of configuration file arguments. module_rngs : PerModuleRNG A collection of random number generators (per module). @@ -100,11 +100,11 @@ def addUncertainties(detDF, configs, module_rngs, verbose=True): verboselog = pplogger.info if verbose else lambda *a, **k: None detDF["astrometricSigma_deg"], detDF["trailedSourceMagSigma"], detDF["SNR"] = uncertainties( - detDF, configs, filterMagName="trailedSourceMagTrue" + detDF, sconfigs, filterMagName="trailedSourceMagTrue" ) - if configs.get("trailing_losses_on", False): - _, detDF["PSFMagSigma"], detDF["SNR"] = uncertainties(detDF, configs, filterMagName="PSFMagTrue") + if sconfigs.expert.trailing_losses_on: + _, detDF["PSFMagSigma"], detDF["SNR"] = uncertainties(detDF, sconfigs, filterMagName="PSFMagTrue") else: detDF["PSFMagSigma"] = detDF["trailedSourceMagSigma"] @@ -113,7 +113,7 @@ def addUncertainties(detDF, configs, module_rngs, verbose=True): def uncertainties( detDF, - configs, + sconfigs, limMagName="fiveSigmaDepth_mag", seeingName="seeingFwhmGeom_arcsec", filterMagName="trailedSourceMagTrue", @@ -130,8 +130,8 @@ def uncertainties( detDF : Pandas dataframe dataframe containing observations. - configs : dictionary - dictionary of configurations from config file. + sconfigs: dataclass + Dataclass of configuration file arguments. limMagName : string, optional pandas dataframe column name of the limiting magnitude. @@ -173,7 +173,7 @@ def uncertainties( signal-to-noise ratio. """ - if configs.get("trailing_losses_on", False): + if sconfigs.expert.trailing_losses_on: dMag = PPTrailingLoss.calcTrailingLoss( detDF[dra_name], detDF[ddec_name], diff --git a/src/sorcha/modules/PPApplyFOVFilter.py b/src/sorcha/modules/PPApplyFOVFilter.py index 53c3bf7b..3d501315 100755 --- a/src/sorcha/modules/PPApplyFOVFilter.py +++ b/src/sorcha/modules/PPApplyFOVFilter.py @@ -5,7 +5,7 @@ from sorcha.modules.PPModuleRNG import PerModuleRNG -def PPApplyFOVFilter(observations, configs, module_rngs, footprint=None, verbose=False): +def PPApplyFOVFilter(observations, sconfigs, module_rngs, footprint=None, verbose=False): """ Wrapper function for PPFootprintFilter and PPFilterDetectionEfficiency that checks to see whether a camera footprint filter should be applied or if a simple fraction of the @@ -22,8 +22,8 @@ def PPApplyFOVFilter(observations, configs, module_rngs, footprint=None, verbose observations: Pandas dataframe dataframe of observations. - configs : dictionary - dictionary of variables from config file. + sconfigs: dataclass + Dataclass of configuration file arguments. module_rngs : PerModuleRNG A collection of random number generators (per module). @@ -45,10 +45,10 @@ def PPApplyFOVFilter(observations, configs, module_rngs, footprint=None, verbose pplogger = logging.getLogger(__name__) verboselog = pplogger.info if verbose else lambda *a, **k: None - if configs["camera_model"] == "footprint": + if sconfigs.fov.camera_model == "footprint": verboselog("Applying sensor footprint filter...") onSensor, detectorIDs = footprint.applyFootprint( - observations, edge_thresh=configs["footprint_edge_threshold"] + observations, edge_thresh=sconfigs.fov.footprint_edge_threshold ) observations = observations.iloc[onSensor].copy() @@ -56,14 +56,14 @@ def PPApplyFOVFilter(observations, configs, module_rngs, footprint=None, verbose observations = observations.sort_index() - if configs["camera_model"] == "circle": + if sconfigs.fov.camera_model == "circle": verboselog("FOV is circular...") - if configs["circle_radius"]: + if sconfigs.fov.circle_radius: verboselog("Circle radius is set. Applying circular footprint filter...") - observations = PPCircleFootprint(observations, configs["circle_radius"]) - if configs["fill_factor"]: + observations = PPCircleFootprint(observations, sconfigs.fov.circle_radius) + if sconfigs.fov.fill_factor: verboselog("Fill factor is set. Removing random observations to mimic chip gaps.") - observations = PPSimpleSensorArea(observations, module_rngs, configs["fill_factor"]) + observations = PPSimpleSensorArea(observations, module_rngs, sconfigs.fov.fill_factor) return observations diff --git a/src/sorcha/modules/PPOutput.py b/src/sorcha/modules/PPOutput.py index 269394e9..b8e06f64 100755 --- a/src/sorcha/modules/PPOutput.py +++ b/src/sorcha/modules/PPOutput.py @@ -132,7 +132,7 @@ def PPIndexSQLDatabase(outf, tablename="sorcha_results"): cnx.commit() -def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): +def PPWriteOutput(cmd_args, sconfigs, observations_in, verbose=False): """ Writes the output in the format specified in the config file to a location specified by the user. @@ -142,8 +142,8 @@ def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): cmd_args : dictionary Dictonary of command line arguments. - configs : Dictionary - Dictionary of config file arguments. + sconfigs: dataclass + Dataclass of configuration file arguments. observations_in : Pandas dataframe Dataframe of output. @@ -171,7 +171,7 @@ def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): + observations_in["Obj_Sun_z_LTC_km"].values ** 2 ) - if configs["output_columns"] == "basic": + if sconfigs.output.output_columns == "basic": observations = observations_in.copy()[ [ "ObjID", @@ -193,14 +193,14 @@ def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): ] # if linking is on and unlinked objects are NOT dropped, add the object_linked column to the output - if configs["SSP_linking_on"] and not configs["drop_unlinked"]: + if sconfigs.linkingfilter.ssp_linking_on and not sconfigs.linkingfilter.drop_unlinked: observations["object_linked"] = observations_in["object_linked"].copy() - elif configs["output_columns"] == "all": + elif sconfigs.output.output_columns == "all": observations = observations_in.copy() - elif len(configs["output_columns"]) > 1: # assume a list of column names... + elif len(sconfigs.output.output_columns) > 1: # assume a list of column names... try: - observations = observations_in.copy()[configs["output_columns"]] + observations = observations_in.copy()[sconfigs.output.output_columns] except KeyError: pplogger.error( "ERROR: at least one of the columns provided in output_columns does not seem to exist. Check docs and try again." @@ -209,7 +209,7 @@ def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): "ERROR: at least one of the columns provided in output_columns does not seem to exist. Check docs and try again." ) - if configs["position_decimals"]: + if sconfigs.output.position_decimals: for position_col in [ "fieldRA_deg", "fieldDec_deg", @@ -221,12 +221,12 @@ def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): ]: try: # depending on type of output selected, some of these columns may not exist. observations[position_col] = observations[position_col].round( - decimals=configs["position_decimals"] + decimals=sconfigs.output.position_decimals ) except KeyError: continue - if configs["magnitude_decimals"]: + if sconfigs.output.magnitude_decimals: for magnitude_col in [ "PSFMag", "trailedSourceMag", @@ -239,26 +239,26 @@ def PPWriteOutput(cmd_args, configs, observations_in, verbose=False): ]: try: # depending on type of output selected, some of these columns may not exist. observations[magnitude_col] = observations[magnitude_col].round( - decimals=configs["magnitude_decimals"] + decimals=sconfigs.output.magnitude_decimals ) except KeyError: continue verboselog("Constructing output path...") - if configs["output_format"] == "csv": + if sconfigs.output.output_format == "csv": outputsuffix = ".csv" out = os.path.join(cmd_args.outpath, cmd_args.outfilestem + outputsuffix) verboselog("Output to CSV file...") observations = PPOutWriteCSV(observations, out) - elif configs["output_format"] == "sqlite3": + elif sconfigs.output.output_format == "sqlite3": outputsuffix = ".db" out = os.path.join(cmd_args.outpath, cmd_args.outfilestem + outputsuffix) verboselog("Output to sqlite3 database...") observations = PPOutWriteSqlite3(observations, out) - elif configs["output_format"] == "hdf5" or configs["output_format"] == "h5": + elif sconfigs.output.output_format == "hdf5" or sconfigs.output.output_format == "h5": outputsuffix = ".h5" out = os.path.join(cmd_args.outpath, cmd_args.outfilestem + outputsuffix) verboselog("Output to HDF5 binary file...") diff --git a/src/sorcha/modules/PPRandomizeMeasurements.py b/src/sorcha/modules/PPRandomizeMeasurements.py index c0bed606..b09ddd37 100644 --- a/src/sorcha/modules/PPRandomizeMeasurements.py +++ b/src/sorcha/modules/PPRandomizeMeasurements.py @@ -31,7 +31,7 @@ logger = logging.getLogger(__name__) -def randomizeAstrometryAndPhotometry(observations, configs, module_rngs, verbose=False): +def randomizeAstrometryAndPhotometry(observations, sconfigs, module_rngs, verbose=False): """ Wrapper function to perform randomisation of astrometry and photometry around their uncertainties. Calls randomizePhotometry() and randomizeAstrometry(). @@ -47,8 +47,8 @@ def randomizeAstrometryAndPhotometry(observations, configs, module_rngs, verbose observations : pandas dataframe Dataframe containing observations. - configs : dict - Dictionary of config file variables. + sconfigs: dataclass + Dataclass of configuration file arguments. module_rngs : PerModuleRNG A collection of random number generators (per module). @@ -70,7 +70,7 @@ def randomizeAstrometryAndPhotometry(observations, configs, module_rngs, verbose # default SNR cut can be disabled in the config file under EXPERT # at low SNR, high photometric sigma causes randomisation to sometimes # grossly inflate/decrease magnitudes. - if configs.get("default_SNR_cut", False): + if sconfigs.expert.default_SNR_cut: verboselog("Removing all observations with SNR < 2.0...") observations = PPSNRLimit(observations.copy(), 2.0) @@ -79,7 +79,7 @@ def randomizeAstrometryAndPhotometry(observations, configs, module_rngs, verbose observations, module_rngs, magName="trailedSourceMagTrue", sigName="trailedSourceMagSigma" ) - if configs.get("trailing_losses_on", False): + if sconfigs.expert.trailing_losses_on: observations["PSFMag"] = randomizePhotometry( observations, module_rngs, magName="PSFMagTrue", sigName="PSFMagSigma" ) diff --git a/src/sorcha/modules/PPStats.py b/src/sorcha/modules/PPStats.py index 324108dd..00183945 100644 --- a/src/sorcha/modules/PPStats.py +++ b/src/sorcha/modules/PPStats.py @@ -3,7 +3,7 @@ import os -def stats(observations, statsfilename, outpath, configs): +def stats(observations, statsfilename, outpath, sconfigs): """ Write a summary statistics file including whether each object was linked or not within miniDifi, their number of observations, min/max phase angles, @@ -18,6 +18,9 @@ def stats(observations, statsfilename, outpath, configs): statsfilename : string Stem filename to write summary stats file to + sconfigs: dataclass + Dataclass of configuration file arguments. + Returns ------- None. @@ -40,11 +43,11 @@ def stats(observations, statsfilename, outpath, configs): ) num_obs = group_by.agg("size").to_frame("number_obs") - if configs["SSP_linking_on"] and not configs["drop_unlinked"]: + if sconfigs.linkingfilter.ssp_linking_on and not sconfigs.linkingfilter.drop_unlinked: linked = group_by["object_linked"].agg("all").to_frame("object_linked") date_linked = group_by["date_linked_MJD"].agg("first").to_frame("date_linked_MJD") joined_stats = num_obs.join([mag, phase_deg, linked, date_linked]) - elif configs["SSP_linking_on"]: + elif sconfigs.linkingfilter.ssp_linking_on: date_linked = group_by["date_linked_MJD"].agg("first").to_frame("date_linked_MJD") joined_stats = num_obs.join([mag, phase_deg, date_linked]) else: diff --git a/src/sorcha/sorcha.py b/src/sorcha/sorcha.py index 14bf4726..0c1087af 100755 --- a/src/sorcha/sorcha.py +++ b/src/sorcha/sorcha.py @@ -40,6 +40,7 @@ from sorcha.lightcurves.lightcurve_registration import update_lc_subclasses from sorcha.utilities.sorchaArguments import sorchaArguments +from sorcha.utilities.sorchaConfigs import sorchaConfigs, PrintConfigsToLog from sorcha.utilities.citation_text import cite_sorcha @@ -77,7 +78,7 @@ def mem(df): return usage -def runLSSTSimulation(args, configs): +def runLSSTSimulation(args, sconfigs): """ Runs the post processing survey simulator functions that apply a series of filters to bias a model Solar System small body population to what the @@ -91,6 +92,8 @@ def runLSSTSimulation(args, configs): pplogger : logging.Logger, optional The logger to use in this function. If None creates a new one. Default = None + sconfigs: dataclass + Dataclass of configuration file arguments. Returns ----------- @@ -110,29 +113,32 @@ def runLSSTSimulation(args, configs): # if not, verboselog does absolutely nothing verboselog = pplogger.info if args.verbose else lambda *a, **k: None - configs["mainfilter"], configs["othercolours"] = PPGetMainFilterAndColourOffsets( - args.paramsinput, configs["observing_filters"], configs["aux_format"] + sconfigs.filters.mainfilter, sconfigs.filters.othercolours = PPGetMainFilterAndColourOffsets( + args.paramsinput, sconfigs.filters.observing_filters, sconfigs.input.aux_format ) - PPPrintConfigsToLog(configs, args) + PrintConfigsToLog(sconfigs, args) # End of config parsing verboselog("Reading pointing database...") filterpointing = PPReadPointingDatabase( - args.pointing_database, configs["observing_filters"], configs["pointing_sql_query"], args.surveyname + args.pointing_database, + sconfigs.filters.observing_filters, + sconfigs.input.pointing_sql_query, + args.surveyname, ) # if we are going to compute the ephemerides, then we should pre-compute all # of the needed values derived from the pointing information. - if configs["ephemerides_type"].casefold() != "external": + if sconfigs.input.ephemerides_type.casefold() != "external": verboselog("Pre-computing pointing information for ephemeris generation") - filterpointing = precompute_pointing_information(filterpointing, args, configs) + filterpointing = precompute_pointing_information(filterpointing, args, sconfigs) # Set up the data readers. - ephem_type = configs["ephemerides_type"] + ephem_type = sconfigs.input.ephemerides_type ephem_primary = False reader = CombinedDataReader(ephem_primary=ephem_primary, verbose=True) @@ -144,12 +150,12 @@ def runLSSTSimulation(args, configs): pplogger.error(f"PPReadAllInput: Unsupported value for ephemerides_type {ephem_type}") sys.exit(f"PPReadAllInput: Unsupported value for ephemerides_type {ephem_type}") if ephem_type.casefold() == "external": - reader.add_ephem_reader(EphemerisDataReader(args.input_ephemeris_file, configs["eph_format"])) + reader.add_ephem_reader(EphemerisDataReader(args.input_ephemeris_file, sconfigs.input.eph_format)) - reader.add_aux_data_reader(OrbitAuxReader(args.orbinfile, configs["aux_format"])) - reader.add_aux_data_reader(CSVDataReader(args.paramsinput, configs["aux_format"])) - if configs["comet_activity"] is not None or configs["lc_model"] is not None: - reader.add_aux_data_reader(CSVDataReader(args.complex_parameters, configs["aux_format"])) + reader.add_aux_data_reader(OrbitAuxReader(args.orbinfile, sconfigs.input.aux_format)) + reader.add_aux_data_reader(CSVDataReader(args.paramsinput, sconfigs.input.aux_format)) + if sconfigs.activity.comet_activity is not None or sconfigs.lightcurve.lc_model is not None: + reader.add_aux_data_reader(CSVDataReader(args.complex_parameters, sconfigs.input.aux_format)) # Check to make sure the ObjIDs in all of the aux_data_readers are a match. reader.check_aux_object_ids() @@ -164,24 +170,24 @@ def runLSSTSimulation(args, configs): lenf = len(reader.aux_data_readers[0].obj_id_table) footprint = None - if configs["camera_model"] == "footprint": + if sconfigs.fov.camera_model == "footprint": verboselog("Creating sensor footprint object for filtering") - footprint = Footprint(configs["footprint_path"]) + footprint = Footprint(sconfigs.fov.footprint_path) while endChunk < lenf: verboselog("Starting main Sorcha processing loop round {}".format(loopCounter)) - endChunk = startChunk + configs["size_serial_chunk"] + endChunk = startChunk + sconfigs.input.size_serial_chunk verboselog("Working on objects {}-{}".format(startChunk, endChunk)) # Processing begins, all processing is done for chunks - if configs["ephemerides_type"].casefold() == "external": + if sconfigs.input.ephemerides_type.casefold() == "external": verboselog("Reading in chunk of orbits and associated ephemeris from an external file") - observations = reader.read_block(block_size=configs["size_serial_chunk"]) + observations = reader.read_block(block_size=sconfigs.input.size_serial_chunk) else: verboselog("Ingest chunk of orbits") - orbits_df = reader.read_aux_block(block_size=configs["size_serial_chunk"]) + orbits_df = reader.read_aux_block(block_size=sconfigs.input.size_serial_chunk) verboselog("Starting ephemeris generation") - observations = create_ephemeris(orbits_df, filterpointing, args, configs) + observations = create_ephemeris(orbits_df, filterpointing, args, sconfigs) verboselog("Ephemeris generation completed") verboselog("Start post processing for this chunk") @@ -193,7 +199,7 @@ def runLSSTSimulation(args, configs): pplogger.info( "WARNING: no ephemeris observations found for these objects. Skipping to next chunk..." ) - startChunk = startChunk + configs["size_serial_chunk"] + startChunk = startChunk + sconfigs.input.size_serial_chunk loopCounter = loopCounter + 1 continue @@ -202,23 +208,23 @@ def runLSSTSimulation(args, configs): verboselog("Calculating apparent magnitudes...") observations = PPCalculateApparentMagnitude( observations, - configs["phase_function"], - configs["mainfilter"], - configs["othercolours"], - configs["observing_filters"], - configs["comet_activity"], - lightcurve_choice=configs["lc_model"], + sconfigs.phasecurves.phase_function, + sconfigs.filters.mainfilter, + sconfigs.filters.othercolours, + sconfigs.filters.observing_filters, + sconfigs.activity.comet_activity, + lightcurve_choice=sconfigs.lightcurve.lc_model, verbose=args.verbose, ) - if configs["trailing_losses_on"]: + if sconfigs.expert.trailing_losses_on: verboselog("Calculating trailing losses...") dmagDetect = PPTrailingLoss(observations, "circularPSF") observations["PSFMagTrue"] = dmagDetect + observations["trailedSourceMagTrue"] else: observations["PSFMagTrue"] = observations["trailedSourceMagTrue"] - if configs["vignetting_on"]: + if sconfigs.expert.vignetting_on: verboselog("Calculating effects of vignetting on limiting magnitude...") observations["fiveSigmaDepth_mag"] = PPVignetting.vignettingEffects(observations) else: @@ -233,15 +239,15 @@ def runLSSTSimulation(args, configs): # Do NOT use trailedSourceMagTrue or PSFMagTrue, these are the unrandomised magnitudes. verboselog("Calculating astrometric and photometric uncertainties...") observations = PPAddUncertainties.addUncertainties( - observations, configs, args._rngs, verbose=args.verbose + observations, sconfigs, args._rngs, verbose=args.verbose ) - if configs["randomization_on"]: + if sconfigs.expert.randomization_on: verboselog( "Number of rows BEFORE randomizing astrometry and photometry: " + str(len(observations.index)) ) observations = PPRandomizeMeasurements.randomizeAstrometryAndPhotometry( - observations, configs, args._rngs, verbose=args.verbose + observations, sconfigs, args._rngs, verbose=args.verbose ) verboselog( "Number of rows AFTER randomizing astrometry and photometry: " + str(len(observations.index)) @@ -261,61 +267,63 @@ def runLSSTSimulation(args, configs): observations["trailedSourceMag"] = observations["trailedSourceMagTrue"].copy() observations["PSFMag"] = observations["PSFMagTrue"].copy() - if configs["camera_model"] != "none" and len(observations.index) > 0: + if sconfigs.fov.camera_model != "none" and len(observations.index) > 0: verboselog("Applying field-of-view filters...") verboselog("Number of rows BEFORE applying FOV filters: " + str(len(observations.index))) observations = PPApplyFOVFilter( - observations, configs, args._rngs, footprint=footprint, verbose=args.verbose + observations, sconfigs, args._rngs, footprint=footprint, verbose=args.verbose ) verboselog("Number of rows AFTER applying FOV filters: " + str(len(observations.index))) - if configs["SNR_limit_on"] and len(observations.index) > 0: + if sconfigs.expert.SNR_limit_on and len(observations.index) > 0: verboselog( "Dropping observations with signal to noise ratio less than {}...".format( - configs["SNR_limit"] + sconfigs.expert.SNR_limit ) ) verboselog("Number of rows BEFORE applying SNR limit filter: " + str(len(observations.index))) - observations = PPSNRLimit(observations, configs["SNR_limit"]) + observations = PPSNRLimit(observations, sconfigs.expert.SNR_limit) verboselog("Number of rows AFTER applying SNR limit filter: " + str(len(observations.index))) - if configs["mag_limit_on"] and len(observations.index) > 0: + if sconfigs.expert.mag_limit_on and len(observations.index) > 0: verboselog("Dropping detections fainter than user-defined magnitude limit... ") verboselog("Number of rows BEFORE applying mag limit filter: " + str(len(observations.index))) - observations = PPMagnitudeLimit(observations, configs["mag_limit"]) + observations = PPMagnitudeLimit(observations, sconfigs.expert.mag_limit) verboselog("Number of rows AFTER applying mag limit filter: " + str(len(observations.index))) - if configs["fading_function_on"] and len(observations.index) > 0: + if sconfigs.fadingfunction.fading_function_on and len(observations.index) > 0: verboselog("Applying detection efficiency fading function...") verboselog("Number of rows BEFORE applying fading function: " + str(len(observations.index))) observations = PPFadingFunctionFilter( observations, - configs["fading_function_peak_efficiency"], - configs["fading_function_width"], + sconfigs.fadingfunction.fading_function_peak_efficiency, + sconfigs.fadingfunction.fading_function_width, args._rngs, verbose=args.verbose, ) verboselog("Number of rows AFTER applying fading function: " + str(len(observations.index))) - if configs["bright_limit_on"] and len(observations.index) > 0: + if sconfigs.saturation.bright_limit_on and len(observations.index) > 0: verboselog("Dropping observations that are too bright...") verboselog("Number of rows BEFORE applying bright limit filter " + str(len(observations.index))) - observations = PPBrightLimit(observations, configs["observing_filters"], configs["bright_limit"]) + observations = PPBrightLimit( + observations, sconfigs.filters.observing_filters, sconfigs.saturation.bright_limit + ) verboselog("Number of rows AFTER applying bright limit filter " + str(len(observations.index))) - if configs["SSP_linking_on"] and len(observations.index) > 0: + if sconfigs.linkingfilter.ssp_linking_on and len(observations.index) > 0: verboselog("Applying SSP linking filter...") verboselog("Number of rows BEFORE applying SSP linking filter: " + str(len(observations.index))) observations = PPLinkingFilter( observations, - configs["SSP_detection_efficiency"], - configs["SSP_number_observations"], - configs["SSP_number_tracklets"], - configs["SSP_track_window"], - configs["SSP_separation_threshold"], - configs["SSP_maximum_time"], - configs["SSP_night_start_utc"], - drop_unlinked=configs["drop_unlinked"], + sconfigs.linkingfilter.ssp_detection_efficiency, + sconfigs.linkingfilter.ssp_number_observations, + sconfigs.linkingfilter.ssp_number_tracklets, + sconfigs.linkingfilter.ssp_track_window, + sconfigs.linkingfilter.ssp_separation_threshold, + sconfigs.linkingfilter.ssp_maximum_time, + sconfigs.linkingfilter.ssp_night_start_utc, + drop_unlinked=sconfigs.linkingfilter.drop_unlinked, ) observations.reset_index(drop=True, inplace=True) verboselog("Number of rows AFTER applying SSP linking filter: " + str(len(observations.index))) @@ -324,17 +332,17 @@ def runLSSTSimulation(args, configs): if len(observations.index) > 0: pplogger.info("Post processing completed for this chunk") pplogger.info("Outputting results for this chunk") - PPWriteOutput(args, configs, observations, verbose=args.verbose) + PPWriteOutput(args, sconfigs, observations, verbose=args.verbose) if args.stats is not None: - stats(observations, args.stats, args.outpath, configs) + stats(observations, args.stats, args.outpath, sconfigs) else: verboselog("No observations left in chunk. No output will be written for this chunk.") - startChunk = startChunk + configs["size_serial_chunk"] + startChunk = startChunk + sconfigs.input.size_serial_chunk loopCounter = loopCounter + 1 # end for - if configs["output_format"] == "sqlite3" and os.path.isfile( + if sconfigs.output.output_format == "sqlite3" and os.path.isfile( os.path.join(args.outpath, args.outfilestem + ".db") ): pplogger.info("Indexing output SQLite database...") diff --git a/src/sorcha/utilities/diffTestUtils.py b/src/sorcha/utilities/diffTestUtils.py index 40e7c559..06bc761a 100644 --- a/src/sorcha/utilities/diffTestUtils.py +++ b/src/sorcha/utilities/diffTestUtils.py @@ -8,6 +8,7 @@ from sorcha.utilities.dataUtilitiesForTests import get_demo_filepath, get_test_filepath from sorcha.utilities.sorchaArguments import sorchaArguments from sorcha.modules.PPConfigParser import PPConfigFileParser +from sorcha.utilities.sorchaConfigs import sorchaConfigs def compare_result_files(test_output, golden_output): @@ -147,6 +148,6 @@ def override_seed_and_run(outpath, arg_set="baseline"): # Override the random number generator seed. # WARNING: This is only acceptable in a test and should never be used for # science results. - configs = PPConfigFileParser(args.configfile, args.surveyname) + configs = sorchaConfigs(args.configfile, args.surveyname) args._rngs = PerModuleRNG(2023) runLSSTSimulation(args, configs) diff --git a/src/sorcha/utilities/sorchaConfigs.py b/src/sorcha/utilities/sorchaConfigs.py new file mode 100644 index 00000000..40ee694f --- /dev/null +++ b/src/sorcha/utilities/sorchaConfigs.py @@ -0,0 +1,1319 @@ +from dataclasses import dataclass +import configparser +import logging +import sys +import os +import numpy as np +from sorcha.lightcurves.lightcurve_registration import LC_METHODS +from sorcha.activity.activity_registration import CA_METHODS + + +@dataclass +class inputConfigs: + """Data class for holding INPUTS section configuration file keys and validating them.""" + + ephemerides_type: str = None + """Simulation used for ephemeris input.""" + + eph_format: str = None + """Format for ephemeris simulation input file.""" + + size_serial_chunk: int = None + """Sorcha chunk size.""" + + aux_format: str = None + """Format for the auxiliary input files.""" + + pointing_sql_query: str = None + """SQL query for extracting data from pointing database.""" + + def __post_init__(self): + """Automagically validates the input configs after initialisation.""" + self._validate_input_configs() + + def _validate_input_configs(self): + """ + Validates the input config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + # make sure all the mandatory keys have been populated. + check_key_exists(self.ephemerides_type, "ephemerides_type") + check_key_exists(self.eph_format, "eph_format") + check_key_exists(self.size_serial_chunk, "size_serial_chunk") + check_key_exists(self.aux_format, "aux_format") + check_key_exists(self.pointing_sql_query, "pointing_sql_query") + + # some additional checks to make sure they all make sense! + check_value_in_list(self.ephemerides_type, ["ar", "external"], "ephemerides_type") + check_value_in_list(self.eph_format, ["csv", "whitespace", "hdf5"], "eph_format") + check_value_in_list(self.aux_format, ["comma", "whitespace", "csv"], "aux_format") + self.size_serial_chunk = cast_as_int(self.size_serial_chunk, "size_serial_chunk") + + +@dataclass +class simulationConfigs: + """Data class for holding SIMULATION section configuration file keys and validating them""" + + ar_ang_fov: float = None + """the field of view of our search field, in degrees""" + + ar_fov_buffer: float = None + """the buffer zone around the field of view we want to include, in degrees""" + + ar_picket: float = None + """imprecise discretization of time that allows us to move progress our simulations forward without getting too granular when we don't have to. the unit is number of days.""" + + ar_obs_code: str = None + """the obscode is the MPC observatory code for the provided telescope.""" + + ar_healpix_order: int = None + """the order of healpix which we will use for the healpy portions of the code.""" + + _ephemerides_type: str = None + """Simulation used for ephemeris input.""" + + def __post_init__(self): + """Automagically validates the simulation configs after initialisation.""" + self._validate_simulation_configs() + + def _validate_simulation_configs(self): + """ + Validates the simulation config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + # make sure all the mandatory keys have been populated. + check_key_exists(self._ephemerides_type, "_ephemerides_type") + check_value_in_list(self._ephemerides_type, ["ar", "external"], "_ephemerides_type") + if self._ephemerides_type == "ar": + check_key_exists(self.ar_ang_fov, "ar_ang_fov") + check_key_exists(self.ar_fov_buffer, "ar_fov_buffer") + check_key_exists(self.ar_picket, "ar_picket") + check_key_exists(self.ar_obs_code, "ar_obs_code") + check_key_exists(self.ar_healpix_order, "ar_healpix_order") + + # some additional checks to make sure they all make sense! + self.ar_ang_fov = cast_as_float(self.ar_ang_fov, "ar_ang_fov") + self.ar_fov_buffer = cast_as_float(self.ar_fov_buffer, "ar_fov_buffer") + self.ar_picket = cast_as_int(self.ar_picket, "ar_picket") + self.ar_healpix_order = cast_as_int(self.ar_healpix_order, "ar_healpix_order") + elif self._ephemerides_type == "external": + # makes sure when these are not needed that they are not populated + check_key_doesnt_exist(self.ar_ang_fov, "ar_ang_fov", "but ephemerides type is external") + check_key_doesnt_exist(self.ar_fov_buffer, "ar_fov_buffer", "but ephemerides type is external") + check_key_doesnt_exist(self.ar_picket, "ar_picket", "but ephemerides type is external") + check_key_doesnt_exist(self.ar_obs_code, "ar_obs_code", "but ephemerides type is external") + check_key_doesnt_exist( + self.ar_healpix_order, "ar_healpix_order", "but ephemerides type is external" + ) + + +@dataclass +class filtersConfigs: + """Data class for holding FILTERS section configuration file keys and validating them""" + + observing_filters: str = None + """Filters of the observations you are interested in, comma-separated.""" + + survey_name: str = None + """survey name to be used for checking filters are correct""" + + mainfilter: str = None + """main filter chosen in physical parameter file""" + + othercolours: str = None + """other filters given alongside main filter""" + + def __post_init__(self): + """Automagically validates the filters configs after initialisation.""" + self._validate_filters_configs() + + def _validate_filters_configs(self): + """ + Validates the filters config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + # checks mandatory keys are populated + check_key_exists(self.observing_filters, "observing_filters") + check_key_exists(self.survey_name, "survey_name") + self.observing_filters = [e.strip() for e in self.observing_filters.split(",")] + self._check_for_correct_filters() + + def _check_for_correct_filters(self): + """ + Checks the filters selected are used by the chosen survey. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + + if self.survey_name in ["rubin_sim", "RUBIN_SIM", "LSST", "lsst"]: + lsst_filters = ["u", "g", "r", "i", "z", "y"] + filters_ok = all(elem in lsst_filters for elem in self.observing_filters) + + if not filters_ok: + bad_list = np.setdiff1d(self.observing_filters, lsst_filters) + logging.error( + "ERROR: Filter(s) {} given in config file are not recognised filters for {} survey.".format( + bad_list, self.survey_name + ) + ) + logging.error("Accepted {} filters: {}".format("LSST", lsst_filters)) + logging.error("Change observing_filters in config file or select another survey.") + sys.exit( + "ERROR: Filter(s) {} given in config file are not recognised filters for {} survey.".format( + bad_list, self.survey_name + ) + ) + + +@dataclass +class saturationConfigs: + """Data class for holding SATURATION section configuration file keys and validating them""" + + bright_limit_on: bool = None + + bright_limit: float = None + """ Upper magnitude limit on sources that will overfill the detector pixels/have counts above the non-linearity regime of the pixels where one can’t do photometry. Objects brighter than this limit (in magnitude) will be cut. """ + + _observing_filters: list = None + """Filters of the observations you are interested in, comma-separated.""" + + def __post_init__(self): + """Automagically validates the saturation configs after initialisation.""" + self._validate_saturation_configs() + + def _validate_saturation_configs(self): + """ + Validates the saturation config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + check_key_exists(self._observing_filters, "_observing_filters") + if self.bright_limit is not None: + self.bright_limit_on = True + + if self.bright_limit_on: + try: + self.bright_limit = [float(e.strip()) for e in self.bright_limit.split(",")] + except ValueError: + logging.error("ERROR: could not parse brightness limits. Check formatting and try again.") + sys.exit("ERROR: could not parse brightness limits. Check formatting and try again.") + if len(self.bright_limit) == 1: + # when only one value is given that value is saved as a float instead of in a list + self.bright_limit = cast_as_float(self.bright_limit[0], "bright_limit") + elif len(self.bright_limit) != 1 and len(self.bright_limit) != len(self._observing_filters): + logging.error( + "ERROR: list of saturation limits is not the same length as list of observing filters." + ) + sys.exit( + "ERROR: list of saturation limits is not the same length as list of observing filters." + ) + + +@dataclass +class phasecurvesConfigs: + """Data class for holding PHASECURVES section configuration file keys and validating them""" + + phase_function: str = None + """The phase function used to calculate apparent magnitude. The physical parameters input""" + + def __post_init__(self): + """Automagically validates the phasecurve configs after initialisation.""" + self._validate_phasecurve_configs() + + def _validate_phasecurve_configs(self): + """ + Validates the phasecurve config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + # make sure all the mandatory keys have been populated. + check_key_exists(self.phase_function, "phase_function") + + check_value_in_list(self.phase_function, ["HG", "HG1G2", "HG12", "linear", "none"], "phase_function") + + +@dataclass +class fovConfigs: + """Data class for holding FOV section configuration file keys and validating them""" + + camera_model: str = None + """Choose between circular or actual camera footprint, including chip gaps.""" + + footprint_path: str = None + """Path to camera footprint file. Uncomment to provide a path to the desired camera detector configuration file if not using the default built-in LSSTCam detector configuration for the actual camera footprint.""" + + fill_factor: str = None + """Fraction of detector surface area which contains CCD -- simulates chip gaps for OIF output. Comment out if using camera footprint.""" + + circle_radius: float = None + """Radius of the circle for a circular footprint (in degrees). Float. Comment out or do not include if using footprint camera model.""" + + footprint_edge_threshold: float = None + """The distance from the edge of a detector (in arcseconds on the focal plane) at which we will not correctly extract an object. By default this is 10px or 2 arcseconds. Comment out or do not include if not using footprint camera model.""" + + survey_name: str = None + """name of survey""" + + def __post_init__(self): + """Automagically validates the fov configs after initialisation.""" + self._validate_fov_configs() + + def _validate_fov_configs(self): + """ + Validates the fov config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + check_key_exists(self.camera_model, "camera_model") + check_value_in_list(self.camera_model, ["circle", "footprint", "none"], "camera_model") + + if self.camera_model == "footprint": + self._camera_footprint() + + elif self.camera_model == "circle": + self._camera_circle() + + def _camera_footprint(self): + """ + Validates the fov config attributes for a footprint camera model. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + if self.footprint_path is not None: + PPFindFileOrExit(self.footprint_path, "footprint_path") + elif self.survey_name.lower() not in ["lsst", "rubin_sim"]: + logging.error( + "ERROR: a default detector footprint is currently only provided for LSST; please provide your own footprint file." + ) + sys.exit( + "ERROR: a default detector footprint is currently only provided for LSST; please provide your own footprint file." + ) + if self.footprint_edge_threshold is not None: + self.footprint_edge_threshold = cast_as_float( + self.footprint_edge_threshold, "footprint_edge_threshold" + ) + check_key_doesnt_exist(self.fill_factor, "fill_factor", 'but camera model is not "circle".') + check_key_doesnt_exist(self.circle_radius, "circle_radius", 'but camera model is not "circle".') + + def _camera_circle(self): + """ + Validates the fov config attributes for a circle camera model. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + if self.fill_factor is not None: + self.fill_factor = cast_as_float(self.fill_factor, "fill_factor") + if self.fill_factor < 0.0 or self.fill_factor > 1.0: + logging.error("ERROR: fill_factor out of bounds. Must be between 0 and 1.") + sys.exit("ERROR: fill_factor out of bounds. Must be between 0 and 1.") + + if self.circle_radius is not None: + self.circle_radius = cast_as_float(self.circle_radius, "circle_radius") + if self.circle_radius < 0.0: + logging.error("ERROR: circle_radius is negative.") + sys.exit("ERROR: circle_radius is negative.") + + if self.fill_factor is None and self.circle_radius is None: + logging.error( + 'ERROR: either "fill_factor" or "circle_radius" must be specified for circular footprint.' + ) + sys.exit( + 'ERROR: either "fill_factor" or "circle_radius" must be specified for circular footprint.' + ) + check_key_doesnt_exist( + self.footprint_edge_threshold, "footprint_edge_threshold", 'but camera model is not "footprint".' + ) + + +@dataclass +class fadingfunctionConfigs: + """Data class for holding FADINGFUNCTION section configuration file keys and validating them""" + + fading_function_on: bool = None + """Detection efficiency fading function on or off.""" + + fading_function_width: float = None + """Width parameter for fading function. Should be greater than zero and less than 0.5.""" + + fading_function_peak_efficiency: float = None + """Peak efficiency for the fading function, called the 'fill factor' in Chelsey and Veres (2017).""" + + def __post_init__(self): + """Automagically validates the fading function configs after initialisation.""" + self._validate_fadingfunction_configs() + + def _validate_fadingfunction_configs(self): + """ + Validates the fadindfunction config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + + # make sure all the mandatory keys have been populated. + check_key_exists(self.fading_function_on, "fading_function_on") + self.fading_function_on = cast_as_bool(self.fading_function_on, "fading_function_on") + + if self.fading_function_on == True: + + # when fading_function_on = true, fading_function_width and fading_function_peak_efficiency now mandatory + check_key_exists(self.fading_function_width, "fading_function_width") + check_key_exists(self.fading_function_peak_efficiency, "fading_function_peak_efficiency") + self.fading_function_width = cast_as_float(self.fading_function_width, "fading_function_width") + self.fading_function_peak_efficiency = cast_as_float( + self.fading_function_peak_efficiency, "fading_function_peak_efficiency" + ) + + # boundary conditions for both width and peak efficency + if self.fading_function_width <= 0.0 or self.fading_function_width > 0.5: + + logging.error( + "ERROR: fading_function_width out of bounds. Must be greater than zero and less than 0.5." + ) + sys.exit( + "ERROR: fading_function_width out of bounds. Must be greater than zero and less than 0.5." + ) + + if self.fading_function_peak_efficiency < 0.0 or self.fading_function_peak_efficiency > 1.0: + logging.error( + "ERROR: fading_function_peak_efficiency out of bounds. Must be between 0 and 1." + ) + sys.exit("ERROR: fading_function_peak_efficiency out of bounds. Must be between 0 and 1.") + + elif self.fading_function_on == False: + # making sure these aren't populated when self.fading_function_on = False + check_key_doesnt_exist( + self.fading_function_width, "fading_function_width", "but fading_function_on is False." + ) + check_key_doesnt_exist( + self.fading_function_peak_efficiency, + "fading_function_peak_efficiency", + "but fading_function_on is False.", + ) + + +@dataclass +class linkingfilterConfigs: + """Data class for holding LINKINGFILTER section configuration file keys and validating them.""" + + ssp_linking_on: bool = None + """flag to see if model should run ssp linking filter""" + + drop_unlinked: bool = None + """Decides if unlinked objects will be dropped.""" + + ssp_detection_efficiency: float = None + """ssp detection efficiency. Which fraction of the observations of an object will the automated solar system processing pipeline successfully link? Float.""" + + ssp_number_observations: int = None + """Length of tracklets. How many observations of an object during one night are required to produce a valid tracklet?""" + + ssp_separation_threshold: float = None + """Minimum separation (in arcsec) between two observations of an object required for the linking software to distinguish them as separate and therefore as a valid tracklet.""" + + ssp_maximum_time: float = None + """Maximum time separation (in days) between subsequent observations in a tracklet. Default is 0.0625 days (90mins).""" + + ssp_number_tracklets: int = None + """Number of tracklets for detection. How many tracklets are required to classify an object as detected? """ + + ssp_track_window: int = None + """The number of tracklets defined above must occur in <= this number of days to constitute a complete track/detection.""" + + ssp_night_start_utc: float = None + """The time in UTC at which it is noon at the observatory location (in standard time). For the LSST, 12pm Chile Standard Time is 4pm UTC.""" + + def __post_init__(self): + """Automagically validates the linking filter configs after initialisation.""" + self._validate_linkingfilter_configs() + + def _validate_linkingfilter_configs(self): + """ + Validates the linkingfilter config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + + sspvariables = [ + self.ssp_separation_threshold, + self.ssp_number_observations, + self.ssp_number_tracklets, + self.ssp_track_window, + self.ssp_detection_efficiency, + self.ssp_maximum_time, + self.ssp_night_start_utc, + ] + + # the below if-statement explicitly checks for None so a zero triggers the correct error + if all(v != None for v in sspvariables): + + self.ssp_detection_efficiency = cast_as_float( + self.ssp_detection_efficiency, "ssp_detection_efficiency" + ) + self.ssp_number_observations = cast_as_int( + self.ssp_number_observations, "ssp_number_observations" + ) + self.ssp_separation_threshold = cast_as_float( + self.ssp_separation_threshold, "ssp_separation_threshold" + ) + self.ssp_maximum_time = cast_as_float(self.ssp_maximum_time, "ssp_maximum_time") + self.ssp_number_tracklets = cast_as_int(self.ssp_number_tracklets, "ssp_number_tracklets") + self.ssp_track_window = cast_as_int(self.ssp_track_window, "ssp_track_window") + self.ssp_night_start_utc = cast_as_float(self.ssp_night_start_utc, "ssp_night_start_utc") + if self.ssp_number_observations < 1: + logging.error("ERROR: ssp_number_observations is zero or negative.") + sys.exit("ERROR: ssp_number_observations is zero or negative.") + + if self.ssp_number_tracklets < 1: + logging.error("ERROR: ssp_number_tracklets is zero or less.") + sys.exit("ERROR: ssp_number_tracklets is zero or less.") + + if self.ssp_track_window <= 0.0: + logging.error("ERROR: ssp_track_window is negative.") + sys.exit("ERROR: ssp_track_window is negative.") + + if self.ssp_detection_efficiency > 1.0 or self.ssp_detection_efficiency < 0: + logging.error("ERROR: ssp_detection_efficiency out of bounds (should be between 0 and 1).") + sys.exit("ERROR: ssp_detection_efficiency out of bounds (should be between 0 and 1).") + + if self.ssp_separation_threshold <= 0.0: + logging.error("ERROR: ssp_separation_threshold is zero or negative.") + sys.exit("ERROR: ssp_separation_threshold is zero or negative.") + + if self.ssp_maximum_time < 0: + logging.error("ERROR: ssp_maximum_time is negative.") + sys.exit("ERROR: ssp_maximum_time is negative.") + + if self.ssp_night_start_utc > 24.0 or self.ssp_night_start_utc < 0.0: + logging.error("ERROR: ssp_night_start_utc must be a valid time between 0 and 24 hours.") + sys.exit("ERROR: ssp_night_start_utc must be a valid time between 0 and 24 hours.") + + self.ssp_linking_on = True + elif all(v == None for v in sspvariables): + self.ssp_linking_on = False + else: + logging.error( + "ERROR: only some ssp linking variables supplied. Supply all five required variables for ssp linking filter, or none to turn filter off." + ) + sys.exit( + "ERROR: only some ssp linking variables supplied. Supply all five required variables for ssp linking filter, or none to turn filter off." + ) + self.drop_unlinked = cast_as_bool_or_set_default(self.drop_unlinked, "drop_unlinked", True) + + +@dataclass +class outputConfigs: + """Data class for holding OUTPUT section configuration file keys and validating them.""" + + output_format: str = None + """Output format of the output file[s]""" + + output_columns: str = None + """Controls which columns are in the output files.""" + + position_decimals: float = None + """position decimal places""" + + magnitude_decimals: float = None + """magnitude decimal places""" + + def __post_init__(self): + """Automagically validates the output configs after initialisation.""" + self._validate_output_configs() + + def _validate_output_configs(self): + """ + Validates the output config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + # make sure all the mandatory keys have been populated. + check_key_exists(self.output_format, "output_format") + check_key_exists(self.output_columns, "output_columns") + + # some additional checks to make sure they all make sense! + check_value_in_list(self.output_format, ["csv", "sqlite3", "hdf5"], "output_format") + + if "," in self.output_columns: # assume list of column names: turn into a list and strip whitespace + self.output_columns = [colname.strip(" ") for colname in self.output_columns.split(",")] + else: + check_value_in_list(self.output_columns, ["basic", "all"], "output_columns") + self._validate_decimals() + + def _validate_decimals(self): + """ + Validates the decimal output config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + if self.position_decimals is not None: + self.position_decimals = cast_as_float(self.position_decimals, "position_decimals") + if self.magnitude_decimals is not None: + self.magnitude_decimals = cast_as_float(self.magnitude_decimals, "magnitude_decimals") + if self.position_decimals is not None and self.position_decimals < 0: + logging.error("ERROR: decimal places config variables cannot be negative.") + sys.exit("ERROR: decimal places config variables cannot be negative.") + if self.magnitude_decimals is not None and self.magnitude_decimals < 0: + logging.error("ERROR: decimal places config variables cannot be negative.") + sys.exit("ERROR: decimal places config variables cannot be negative.") + + +@dataclass +class lightcurveConfigs: + """Data class for holding LIGHTCURVE section configuration file keys and validating them.""" + + lc_model: str = None + """The unique name of the lightcurve model to use. Defined in the ``name_id`` method of the subclasses of AbstractLightCurve. If not none, the complex physical parameters file must be specified at the command line.lc_model = none""" + + def __post_init__(self): + """Automagically validates the lightcurve configs after initialisation.""" + self._validate_lightcurve_configs() + + def _validate_lightcurve_configs(self): + """ + Validates the lightcurve config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + self.lc_model = None if self.lc_model == "none" else self.lc_model + if self.lc_model is not None and self.lc_model not in LC_METHODS: + logging.error( + f"The requested light curve model, '{self.lc_model}', is not registered. Available lightcurve options are: {list(LC_METHODS.keys())}" + ) + sys.exit( + f"The requested light curve model, '{self.lc_model}', is not registered. Available lightcurve options are: {list(LC_METHODS.keys())}" + ) + + +@dataclass +class activityConfigs: + """Data class for holding Activity section configuration file keys and validating them.""" + + comet_activity: str = None + """The unique name of the actvity model to use. Defined in the ``name_id`` method of the subclasses of AbstractCometaryActivity. If not none, a complex physical parameters file must be specified at the command line.""" + + def __post_init__(self): + """Automagically validates the activity configs after initialisation.""" + self._validate_activity_configs() + + def _validate_activity_configs(self): + """ + Validates the activity config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + self.comet_activity = None if self.comet_activity == "none" else self.comet_activity + if self.comet_activity is not None and self.comet_activity not in CA_METHODS: + logging.error( + f"The requested comet activity model, '{self.comet_activity}', is not registered. Available comet activity models are: {list(CA_METHODS.keys())}" + ) + sys.exit( + f"The requested comet activity model, '{self.comet_activity}', is not registered. Available comet activity models are: {list(CA_METHODS.keys())}" + ) + + +@dataclass +class expertConfigs: + """Data class for holding expert section configuration file keys and validating them.""" + + SNR_limit: float = None + """Drops observations with signal to noise ratio less than limit given""" + + SNR_limit_on: bool = None + """flag for when an SNR limit is given""" + + mag_limit: float = None + """Drops observations with magnitude less than limit given""" + + mag_limit_on: bool = None + """flag for when a magnitude limit is given""" + + trailing_losses_on: bool = None + """flag for trailing losses""" + + default_SNR_cut: bool = None + """flag for default SNR""" + + randomization_on: bool = None + """flag for randomizing astrometry and photometry""" + + vignetting_on: bool = None + """flag for calculating effects of vignetting on limiting magnitude""" + + def __post_init__(self): + """Automagically validates the expert configs after initialisation.""" + self._validate_expert_configs() + + def _validate_expert_configs(self): + """ + Validates the expert config attributes after initialisation. + + Parameters + ----------- + None. + + Returns + ---------- + None + """ + if self.SNR_limit is not None: + self.SNR_limit_on = True + if self.SNR_limit < 0: + logging.error("ERROR: SNR limit is negative.") + sys.exit("ERROR: SNR limit is negative.") + else: + self.SNR_limit_on = False + + if self.mag_limit is not None: + self.mag_limit_on = True + if self.mag_limit < 0: + logging.error("ERROR: magnitude limit is negative.") + sys.exit("ERROR: magnitude limit is negative.") + else: + self.mag_limit_on = False + + if self.mag_limit_on and self.SNR_limit_on: + logging.error( + "ERROR: SNR limit and magnitude limit are mutually exclusive. Please delete one or both from config file." + ) + sys.exit( + "ERROR: SNR limit and magnitude limit are mutually exclusive. Please delete one or both from config file." + ) + + self.trailing_losses_on = cast_as_bool_or_set_default( + self.trailing_losses_on, "trailing_losses_on", True + ) + self.default_SNR_cut = cast_as_bool_or_set_default(self.default_SNR_cut, "default_SNR_cut", True) + self.randomization_on = cast_as_bool_or_set_default(self.randomization_on, "randomization_on", True) + self.vignetting_on = cast_as_bool_or_set_default(self.vignetting_on, "vignetting_on", True) + + +@dataclass +class sorchaConfigs: + """Dataclass which stores configuration file keywords in dataclasses.""" + + input: inputConfigs = None + """inputConfigs dataclass which stores the keywords from the INPUT section of the config file.""" + + simulation: simulationConfigs = None + """simulationConfigs dataclass which stores the keywords from the SIMULATION section of the config file.""" + + filters: filtersConfigs = None + """filtersConfigs dataclass which stores the keywords from the FILTERS section of the config file.""" + + saturation: saturationConfigs = None + """saturationConfigs dataclass which stores the keywords from the SATURATION section of the config file.""" + + phasecurves: phasecurvesConfigs = None + """phasecurveConfigs dataclass which stores the keywords from the PHASECURVES section of the config file.""" + + fov: fovConfigs = None + """fovConfigs dataclass which stores the keywords from the FOV section of the config file.""" + + fadingfunction: fadingfunctionConfigs = None + """fadingfunctionConfigs dataclass which stores the keywords from the FADINGFUNCTION section of the config file.""" + + linkingfilter: linkingfilterConfigs = None + """linkingfilterConfigs dataclass which stores the keywords from the LINKINGFILTER section of the config file.""" + + output: outputConfigs = None + """outputConfigs dataclass which stores the keywords from the OUTPUT section of the config file.""" + + lightcurve: lightcurveConfigs = None + """lightcurveConfigs dataclass which stores the keywords from the LIGHTCURVE section of the config file.""" + + activity: activityConfigs = None + """activityConfigs dataclass which stores the keywords from the ACTIVITY section of the config file.""" + + expert: expertConfigs = None + """expertConfigs dataclass which stores the keywords from the EXPERT section of the config file.""" + + pplogger: None = None + """The Python logger instance""" + + survey_name: str = "" + """The name of the survey.""" + + # this __init__ overrides a dataclass's inbuilt __init__ because we want to populate this from a file, not explicitly ourselves + def __init__(self, config_file_location=None, survey_name=None): + + # attach the logger object so we can print things to the Sorcha logs + self.pplogger = logging.getLogger(__name__) + self.survey_name = survey_name + + if config_file_location: # if a location to a config file is supplied... + # Save a raw copy of the configuration to the logs as a backup. + with open(config_file_location, "r") as file: + logging.info(f"Copy of configuration file {config_file_location}:\n{file.read()}") + + config_object = configparser.ConfigParser() # create a ConfigParser object + config_object.read(config_file_location) # and read the whole config file into it + self._read_configs_from_object( + config_object + ) # now we call a function that populates the class attributes + + def _read_configs_from_object(self, config_object): + """ + function that populates the class attributes + + Parameters + ----------- + config_object: ConfigParser object + ConfigParser object that has the config file read into it + + Returns + ---------- + None + + """ + + # list of sections and corresponding config file + section_list = { + "INPUT": inputConfigs, + "SIMULATION": simulationConfigs, + "FILTERS": filtersConfigs, + "SATURATION": saturationConfigs, + "PHASECURVES": phasecurvesConfigs, + "FOV": fovConfigs, + "FADINGFUNCTION": fadingfunctionConfigs, + "LINKINGFILTER": linkingfilterConfigs, + "OUTPUT": outputConfigs, + "LIGHTCURVE": lightcurveConfigs, + "ACTIVITY": activityConfigs, + "EXPERT": expertConfigs, + } + # when adding new sections in config file this general function needs the name of the section in uppercase + # to be the same as the attributes defined above in lowercase e.g. section INPUT has attribute input + # general function that reads in config file sections into there config dataclasses + for section, config_section in section_list.items(): + if config_object.has_section(section): + extra_args = {} + if section == "SIMULATION": + extra_args["_ephemerides_type"] = self.input.ephemerides_type + elif section == "FILTERS": + extra_args["survey_name"] = self.survey_name + elif section == "SATURATION": + extra_args["_observing_filters"] = self.filters.observing_filters + elif section == "FOV": + extra_args["survey_name"] = self.survey_name + section_dict = dict(config_object[section]) + config_instance = config_section(**section_dict, **extra_args) + + else: + config_instance = config_section() # if section not in config file take default values + section_key = section.lower() + setattr(self, section_key, config_instance) + + +## below are the utility functions used to help validate the keywords, add more as needed + + +def check_key_exists(value, key_name): + """ + Checks to confirm that a mandatory config file value is present and has been read into the dataclass as truthy. Returns an error if value is falsy + + Parameters + ----------- + value : object attribute + value of the config file attribute + + key_name : string + The key being checked. + + Returns + ---------- + None. + + """ + + if value is None: + logging.error( + f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + sys.exit( + f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +def check_key_doesnt_exist(value, key_name, reason): + """ + Checks to confirm that a config file value is not present and has been read into the dataclass as falsy. Returns an error if value is truthy + + Parameters + ----------- + value : object attribute + value of the config file attribute + + key_name : string + The key being checked. + + reason : string + reason given in the error message on why this value shouldn't be in the config file + + Returns + ---------- + None. + """ + + # checks to make sure value doesn't exist + if value is not None: + logging.error(f"ERROR: {key_name} supplied in config file {reason}") + sys.exit(f"ERROR: {key_name} supplied in config file {reason}") + + +def cast_as_int(value, key): + # replaces PPGetIntOrExit: checks to make sure the value can be cast as an integer. + """ + Checks to see if value can be cast as an interger. + + Parameters + ----------- + value : object attribute + value of the config file attribute + + key : string + The key being checked. + Returns + ---------- + value as an integer + + """ + + try: + int(value) + except ValueError: + logging.error(f"ERROR: expected an int for config parameter {key}. Check value in config file.") + sys.exit(f"ERROR: expected an int for config parameter {key}. Check value in config file.") + + return int(value) + + +def cast_as_float(value, key): + # replaces PPGetFloatOrExit: checks to make sure the value can be cast as a float. + """ + Checks to see if value can be cast as a float. + + Parameters + ----------- + value : object attribute + value of the config file attribute + + key : string + The key being checked. + Returns + ---------- + value as a float + + """ + + try: + float(value) + except ValueError: + logging.error(f"ERROR: expected a float for config parameter {key}. Check value in config file.") + sys.exit(f"ERROR: expected a float for config parameter {key}. Check value in config file.") + + return float(value) + + +def cast_as_bool(value, key): + # replaces PPGetBoolOrExit: checks to make sure the value can be cast as a bool. + """ + Checks to see if value can be cast as a boolen. + + Parameters + ----------- + value : object attribute + value of the config file attribute + + key : string + The key being checked. + Returns + ---------- + value as a boolen + """ + + str_value = str(value).strip() + + if str_value in ["true", "1", "yes", "y", "True"]: + return True + elif str_value in ["false", "0", "no", "n", "False"]: + return False + else: + logging.error(f"ERROR: expected a bool for config parameter {key}. Check value in config file.") + sys.exit(f"ERROR: expected a bool for config parameter {key}. Check value in config file.") + + +def check_value_in_list(value, valuelist, key): + # PPConfigParser often checks to see if a config variable is in a list of permissible variables, so this abstracts it out. + """ + Checks to see if a config variable is in a list of permissible variables. + + Parameters + ----------- + value : object attribute + value of the config file value + + valuelist: list + list of permissible values for attribute + + key : string + The key being checked. + Returns + ---------- + None. + + """ + + if value not in valuelist: + logging.error( + f"ERROR: value {value} for config parameter {key} not recognised. Expecting one of: {valuelist}." + ) + sys.exit( + f"ERROR: value {value} for config parameter {key} not recognised. Expecting one of: {valuelist}." + ) + + +def PPFindFileOrExit(arg_fn, argname): + """Checks to see if a file given by a filename exists. If it doesn't, + this fails gracefully and exits to the command line. + + Parameters + ----------- + arg_fn : string + The filepath/name of the file to be checked. + + argname : string + The name of the argument being checked. Used for error message. + + Returns + ---------- + arg_fn : string + The filepath/name of the file to be checked. + + """ + + pplogger = logging.getLogger(__name__) + + if os.path.exists(arg_fn): + return arg_fn + else: + pplogger.error("ERROR: filename {} supplied for {} argument does not exist.".format(arg_fn, argname)) + sys.exit("ERROR: filename {} supplied for {} argument does not exist.".format(arg_fn, argname)) + + +def cast_as_bool_or_set_default(value, key, default): + + # replaces PPGetBoolOrExit: checks to make sure the value can be cast as a bool. + """ + Checks to see if value can be cast as a boolen and if not set (equals None) gives default bool. + + Parameters + ----------- + value : object attribute + value of the config file attribute + + key : string + The key being checked. + + default : bool + default bool if value is None + + Returns + ---------- + value as a boolen + """ + + if value is not None: + + str_value = str(value).strip() + + if str_value in ["true", "1", "yes", "y", "True"]: + return True + elif str_value in ["false", "0", "no", "n", "False"]: + return False + else: + logging.error(f"ERROR: expected a bool for config parameter {key}. Check value in config file.") + sys.exit(f"ERROR: expected a bool for config parameter {key}. Check value in config file.") + elif value is None: + return default + + +def PrintConfigsToLog(sconfigs, cmd_args): + """ + Prints all the values from the config file and command line to the log. + + Parameters + ----------- + sconfigs : dataclass + Dataclass of config file variables. + + cmd_args : dictionary + Dictionary of command line arguments. + + Returns + ---------- + None. + + """ + pplogger = logging.getLogger(__name__) + + pplogger.info("The config file used is located at " + cmd_args.configfile) + pplogger.info("The physical parameters file used is located at " + cmd_args.paramsinput) + pplogger.info("The orbits file used is located at " + cmd_args.orbinfile) + if cmd_args.input_ephemeris_file: + pplogger.info("The ephemerides file used is located at " + cmd_args.input_ephemeris_file) + if cmd_args.output_ephemeris_file: + pplogger.info("The output ephemerides file is located " + cmd_args.output_ephemeris_file) + pplogger.info("The survey selected is: " + cmd_args.surveyname) + + if sconfigs.activity.comet_activity == "comet": + pplogger.info("Cometary activity set to: " + str(sconfigs.activity.comet_activity)) + elif sconfigs.activity.comet_activity == "none": + pplogger.info("No cometary activity selected.") + + pplogger.info("Format of ephemerides file is: " + sconfigs.input.eph_format) + pplogger.info("Format of auxiliary files is: " + sconfigs.input.aux_format) + + pplogger.info("Pointing database path is: " + cmd_args.pointing_database) + pplogger.info("Pointing database required query is: " + sconfigs.input.pointing_sql_query) + + pplogger.info( + "The number of objects processed in a single chunk is: " + str(sconfigs.input.size_serial_chunk) + ) + pplogger.info("The main filter in which H is defined is " + sconfigs.filters.mainfilter) + rescs = " ".join(str(f) for f in sconfigs.filters.observing_filters) + pplogger.info("The filters included in the post-processing results are " + rescs) + + if sconfigs.filters.othercolours: + othcs = " ".join(str(e) for e in sconfigs.filters.othercolours) + pplogger.info("Thus, the colour indices included in the simulation are " + othcs) + + pplogger.info( + "The apparent brightness is calculated using the following phase function model: " + + sconfigs.phasecurves.phase_function + ) + + if sconfigs.expert.trailing_losses_on: + pplogger.info("Computation of trailing losses is switched ON.") + else: + pplogger.info("Computation of trailing losses is switched OFF.") + + if sconfigs.expert.randomization_on: + pplogger.info("Randomization of position and magnitude around uncertainties is switched ON.") + else: + pplogger.info("Randomization of position and magnitude around uncertainties is switched OFF.") + + if sconfigs.expert.vignetting_on: + pplogger.info("Vignetting is switched ON.") + else: + pplogger.info("Vignetting is switched OFF.") + + if sconfigs.fov.camera_model == "footprint": + pplogger.info("Footprint is modelled after the actual camera footprint.") + if sconfigs.fov.footprint_path: + pplogger.info("Loading camera footprint from " + sconfigs.fov.footprint_path) + else: + pplogger.info("Loading default LSST footprint LSST_detector_corners_100123.csv") + elif sconfigs.fov.camera_model == "circle": + pplogger.info("Footprint is circular.") + if sconfigs.fov.fill_factor: + pplogger.info( + "The code will approximate chip gaps using filling factor: " + str(sconfigs.fov.fill_factor) + ) + elif sconfigs.fov.circle_radius: + pplogger.info( + "A circular footprint will be applied with radius: " + str(sconfigs.fov.circle_radius) + ) + else: + pplogger.info("Camera footprint is turned OFF.") + + if sconfigs.saturation.bright_limit_on: + pplogger.info("The upper saturation limit(s) is/are: " + str(sconfigs.saturation.bright_limit)) + else: + pplogger.info("Saturation limit is turned OFF.") + + if sconfigs.expert.SNR_limit_on: + pplogger.info("The lower SNR limit is: " + str(sconfigs.expert.SNR_limit)) + else: + pplogger.info("SNR limit is turned OFF.") + + if sconfigs.expert.default_SNR_cut: + pplogger.info("Default SNR cut is ON. All observations with SNR < 2.0 will be removed.") + + if sconfigs.expert.mag_limit_on: + pplogger.info("The magnitude limit is: " + str(sconfigs.expert.mag_limit)) + else: + pplogger.info("Magnitude limit is turned OFF.") + + if sconfigs.fadingfunction.fading_function_on: + pplogger.info("The detection efficiency fading function is ON.") + pplogger.info( + "The width parameter of the fading function has been set to: " + + str(sconfigs.fadingfunction.fading_function_width) + ) + pplogger.info( + "The peak efficiency of the fading function has been set to: " + + str(sconfigs.fadingfunction.fading_function_peak_efficiency) + ) + else: + pplogger.info("The detection efficiency fading function is OFF.") + + if sconfigs.linkingfilter.ssp_linking_on: + pplogger.info("Solar System Processing linking filter is turned ON.") + pplogger.info("For SSP linking...") + pplogger.info( + "...the fractional detection efficiency is: " + + str(sconfigs.linkingfilter.ssp_detection_efficiency) + ) + pplogger.info( + "...the minimum required number of observations in a tracklet is: " + + str(sconfigs.linkingfilter.ssp_number_observations) + ) + pplogger.info( + "...the minimum required number of tracklets to form a track is: " + + str(sconfigs.linkingfilter.ssp_number_tracklets) + ) + pplogger.info( + "...the maximum window of time in days of tracklets to be contained in to form a track is: " + + str(sconfigs.linkingfilter.ssp_track_window) + ) + pplogger.info( + "...the minimum angular separation between observations in arcseconds is: " + + str(sconfigs.linkingfilter.ssp_separation_threshold) + ) + pplogger.info( + "...the maximum temporal separation between subsequent observations in a tracklet in days is: " + + str(sconfigs.linkingfilter.ssp_maximum_time) + ) + if not sconfigs.linkingfilter.drop_unlinked: + pplogger.info("Unlinked objects will not be dropped.") + else: + pplogger.info("Solar System Processing linking filter is turned OFF.") + + if sconfigs.input.ephemerides_type == "ar": + pplogger.info("ASSIST+REBOUND Simulation is turned ON.") + pplogger.info("For ASSIST+REBOUND...") + pplogger.info("...the field's angular FOV is: " + str(sconfigs.simulation.ar_ang_fov)) + pplogger.info("...the buffer around the FOV is: " + str(sconfigs.simulation.ar_fov_buffer)) + pplogger.info("...the picket interval is: " + str(sconfigs.simulation.ar_picket)) + pplogger.info("...the observatory code is: " + str(sconfigs.simulation.ar_obs_code)) + pplogger.info("...the healpix order is: " + str(sconfigs.simulation.ar_healpix_order)) + else: + pplogger.info("ASSIST+REBOUND Simulation is turned OFF.") + + if sconfigs.lightcurve.lc_model: + pplogger.info("A lightcurve model is being applied.") + pplogger.info("The lightcurve model is: " + sconfigs.lightcurve.lc_model) + else: + pplogger.info("No lightcurve model is being applied.") + + pplogger.info( + "Output files will be saved in path: " + cmd_args.outpath + " with filestem " + cmd_args.outfilestem + ) + pplogger.info("Output files will be saved as format: " + sconfigs.output.output_format) + pplogger.info( + "In the output, positions will be rounded to " + + str(sconfigs.output.position_decimals) + + " decimal places." + ) + pplogger.info( + "In the output, magnitudes will be rounded to " + + str(sconfigs.output.magnitude_decimals) + + " decimal places." + ) + if isinstance(sconfigs.output.output_columns, list): + pplogger.info("The output columns are set to: " + " ".join(sconfigs.output.output_columns)) + else: + pplogger.info("The output columns are set to: " + sconfigs.output.output_columns) diff --git a/src/sorcha_cmdline/run.py b/src/sorcha_cmdline/run.py index 5c6a32ac..b79767fc 100644 --- a/src/sorcha_cmdline/run.py +++ b/src/sorcha_cmdline/run.py @@ -138,6 +138,7 @@ def execute(args): PPConfigFileParser, runLSSTSimulation, sorchaArguments, + sorchaConfigs, update_activity_subclasses, update_lc_subclasses, ) @@ -156,21 +157,21 @@ def execute(args): # Extract and validate the remaining arguments. cmd_args = PPCommandLineParser(args) pplogger.info("Reading configuration file...") - configs = PPConfigFileParser(cmd_args["configfile"], cmd_args["surveyname"]) + # configs = PPConfigFileParser(cmd_args["configfile"], cmd_args["surveyname"]) + sconfigs = sorchaConfigs(cmd_args["configfile"], cmd_args["surveyname"]) pplogger.info("Configuration file read.") - if configs["ephemerides_type"] == "external" and cmd_args["input_ephemeris_file"] is None: + if sconfigs.input.ephemerides_type == "external" and cmd_args["input_ephemeris_file"] is None: pplogger.error("ERROR: A+R simulation not enabled and no ephemerides file provided") sys.exit("ERROR: A+R simulation not enabled and no ephemerides file provided") - if configs["lc_model"] and cmd_args["complex_physical_parameters"] is None: + if sconfigs.lightcurve.lc_model and cmd_args["complex_physical_parameters"] is None: pplogger.error("ERROR: No complex physical parameter file provided for light curve model") sys.exit("ERROR: No complex physical parameter file provided for light curve model") - if configs["comet_activity"] and cmd_args["complex_physical_parameters"] is None: + if sconfigs.activity.comet_activity and cmd_args["complex_physical_parameters"] is None: pplogger.error("ERROR: No complex physical parameter file provided for comet activity model") sys.exit("ERROR: No complex physical parameter file provided for comet activity model") - if "SORCHA_SEED" in os.environ: cmd_args["seed"] = int(os.environ["SORCHA_SEED"]) pplogger.info(f"Random seed overridden via environmental variable, SORCHA_SEED={cmd_args['seed']}") @@ -186,7 +187,7 @@ def execute(args): except Exception as err: pplogger.error(err) sys.exit(err) - runLSSTSimulation(args, configs) + runLSSTSimulation(args, sconfigs) elif cmd_args["surveyname"] in ["LSST", "lsst"]: pplogger.error( "ERROR: The LSST has not started yet Current allowed surveys are: {}".format( diff --git a/tests/data/PPConfig_goldens_test.ini b/tests/data/PPConfig_goldens_test.ini index b050eb12..f5089992 100644 --- a/tests/data/PPConfig_goldens_test.ini +++ b/tests/data/PPConfig_goldens_test.ini @@ -119,22 +119,22 @@ SSP_night_start_utc = 16.0 # Configs for running the ASSIST+REBOUND ephemerides generator. # the field of view of our search field, in degrees -ar_ang_fov = 1.8 +#ar_ang_fov = 1.8 # the buffer zone around the field of view we want to include, in degrees -ar_fov_buffer = 0.2 +#ar_fov_buffer = 0.2 # the "picket" is our imprecise discretization of time that allows us to move progress # our simulations forward without getting too granular when we don't have to. # the unit is number of days. -ar_picket = 1 +#ar_picket = 1 # the obscode is the MPC observatory code for the provided telescope. -ar_obs_code = X05 +#ar_obs_code = X05 # the order of healpix which we will use for the healpy portions of the code. # the nside is equivalent to 2**ar_healpix_order -ar_healpix_order = 6 +#ar_healpix_order = 6 [OUTPUT] diff --git a/tests/data/PPConfig_test_chunked.ini b/tests/data/PPConfig_test_chunked.ini index c0d96fc4..e6b05670 100644 --- a/tests/data/PPConfig_test_chunked.ini +++ b/tests/data/PPConfig_test_chunked.ini @@ -116,22 +116,22 @@ fading_function_on = False # Configs for running the ASSIST+REBOUND ephemerides generator. # the field of view of our search field, in degrees -ar_ang_fov = 1.8 +#ar_ang_fov = 1.8 # the buffer zone around the field of view we want to include, in degrees -ar_fov_buffer = 0.2 +#ar_fov_buffer = 0.2 # the "picket" is our imprecise discretization of time that allows us to move progress # our simulations forward without getting too granular when we don't have to. # the unit is number of days. -ar_picket = 1 +#ar_picket = 1 # the obscode is the MPC observatory code for the provided telescope. -ar_obs_code = X05 +#ar_obs_code = X05 # the order of healpix which we will use for the healpy portions of the code. # the nside is equivalent to 2**ar_healpix_order -ar_healpix_order = 6 +#ar_healpix_order = 6 [OUTPUT] diff --git a/tests/data/PPConfig_test_unchunked.ini b/tests/data/PPConfig_test_unchunked.ini index ff1b873b..e6b03b12 100644 --- a/tests/data/PPConfig_test_unchunked.ini +++ b/tests/data/PPConfig_test_unchunked.ini @@ -116,22 +116,22 @@ fading_function_on = False # Configs for running the ASSIST+REBOUND ephemerides generator. # the field of view of our search field, in degrees -ar_ang_fov = 1.8 +#ar_ang_fov = 1.8 # the buffer zone around the field of view we want to include, in degrees -ar_fov_buffer = 0.2 +#ar_fov_buffer = 0.2 # the "picket" is our imprecise discretization of time that allows us to move progress # our simulations forward without getting too granular when we don't have to. # the unit is number of days. -ar_picket = 1 +#ar_picket = 1 # the obscode is the MPC observatory code for the provided telescope. -ar_obs_code = X05 +#ar_obs_code = X05 # the order of healpix which we will use for the healpy portions of the code. # the nside is equivalent to 2**ar_healpix_order -ar_healpix_order = 6 +#ar_healpix_order = 6 [OUTPUT] diff --git a/tests/ephemeris/test_ephemeris_generation.py b/tests/ephemeris/test_ephemeris_generation.py index 48916573..844cf6ac 100644 --- a/tests/ephemeris/test_ephemeris_generation.py +++ b/tests/ephemeris/test_ephemeris_generation.py @@ -10,7 +10,7 @@ from sorcha.ephemeris.simulation_driver import create_ephemeris, write_out_ephemeris_file from sorcha.modules.PPReadPointingDatabase import PPReadPointingDatabase from sorcha.ephemeris.simulation_setup import precompute_pointing_information - +from sorcha.utilities.sorchaConfigs import sorchaConfigs ,inputConfigs from sorcha.readers.CombinedDataReader import CombinedDataReader from sorcha.readers.EphemerisReader import EphemerisDataReader @@ -123,14 +123,14 @@ def test_ephemeris_end2end(single_synthetic_pointing, tmp_path): pplogger = PPGetLogger(cmd_args_dict["outpath"], "test_log") args = sorchaArguments(cmd_args_dict) - configs = PPConfigFileParser( + configs = sorchaConfigs( args.configfile, args.surveyname, ) - configs["seed"] = 24601 + #configs["seed"] = 24601 filterpointing = PPReadPointingDatabase( - args.pointing_database, configs["observing_filters"], configs["pointing_sql_query"], "rubin_sim" + args.pointing_database, configs.filters.observing_filters, configs.input.pointing_sql_query, "rubin_sim" ) filterpointing = precompute_pointing_information(filterpointing, args, configs) @@ -165,7 +165,15 @@ class args(object): cmd_args = args() - configs = {"eph_format": "csv"} + correct_inputs = { + "ephemerides_type": "ar", + "eph_format": "csv", + "size_serial_chunk": 5000, + "aux_format": "whitespace", + "pointing_sql_query": "SELECT observationId, observationStartMJD as observationStartMJD_TAI, visitTime, visitExposureTime, filter, seeingFwhmGeom as seeingFwhmGeom_arcsec, seeingFwhmEff as seeingFwhmEff_arcsec, fiveSigmaDepth as fieldFiveSigmaDepth_mag , fieldRA as fieldRA_deg, fieldDec as fieldDec_deg, rotSkyPos as fieldRotSkyPos_deg FROM observations order by observationId", +} + configs = inputConfigs(**correct_inputs) + setattr(configs,"input", configs) out_path = os.path.join(tmp_path, "test_ephem_out") @@ -195,7 +203,15 @@ class args(object): cmd_args = args() - configs = {"eph_format": "whitespace"} + correct_inputs = { + "ephemerides_type": "ar", + "eph_format": "whitespace", + "size_serial_chunk": 5000, + "aux_format": "whitespace", + "pointing_sql_query": "SELECT observationId, observationStartMJD as observationStartMJD_TAI, visitTime, visitExposureTime, filter, seeingFwhmGeom as seeingFwhmGeom_arcsec, seeingFwhmEff as seeingFwhmEff_arcsec, fiveSigmaDepth as fieldFiveSigmaDepth_mag , fieldRA as fieldRA_deg, fieldDec as fieldDec_deg, rotSkyPos as fieldRotSkyPos_deg FROM observations order by observationId", +} + configs = inputConfigs(**correct_inputs) + setattr(configs,"input", configs) out_path = os.path.join(tmp_path, "test_ephem_out_whitespace") @@ -225,7 +241,15 @@ class args(object): cmd_args = args() - configs = {"eph_format": "hdf5"} + correct_inputs = { + "ephemerides_type": "ar", + "eph_format": "hdf5", + "size_serial_chunk": 5000, + "aux_format": "whitespace", + "pointing_sql_query": "SELECT observationId, observationStartMJD as observationStartMJD_TAI, visitTime, visitExposureTime, filter, seeingFwhmGeom as seeingFwhmGeom_arcsec, seeingFwhmEff as seeingFwhmEff_arcsec, fiveSigmaDepth as fieldFiveSigmaDepth_mag , fieldRA as fieldRA_deg, fieldDec as fieldDec_deg, rotSkyPos as fieldRotSkyPos_deg FROM observations order by observationId", +} + configs = inputConfigs(**correct_inputs) + setattr(configs,"input", configs) out_path = os.path.join(tmp_path, "test_ephem_out_h5") diff --git a/tests/ephemeris/test_pixdict.py b/tests/ephemeris/test_pixdict.py index 7d22debb..ac8331bd 100644 --- a/tests/ephemeris/test_pixdict.py +++ b/tests/ephemeris/test_pixdict.py @@ -11,11 +11,12 @@ generate_simulations, furnish_spiceypy, ) + from sorcha.ephemeris.pixel_dict import PixelDict from sorcha.ephemeris.simulation_parsing import Observatory from sorcha.ephemeris.simulation_geometry import ecliptic_to_equatorial, vec2ra_dec from sorcha.ephemeris.simulation_constants import SPEED_OF_LIGHT, AU_KM - +from sorcha.utilities.sorchaConfigs import sorchaConfigs def test_pixeldict(tmp_path): # this test function will test out a bunch of different things inside @@ -66,14 +67,14 @@ def test_pixeldict(tmp_path): args = sorchaArguments(cmd_args_dict) - configs = PPConfigFileParser( + configs = sorchaConfigs( args.configfile, args.surveyname, ) - configs["seed"] = 24601 + #configs["seed"] = 24601 filterpointing = PPReadPointingDatabase( - args.pointing_database, configs["observing_filters"], configs["pointing_sql_query"], "rubin_sim" + args.pointing_database, configs.filters.observing_filters, configs.input.pointing_sql_query, "rubin_sim" ) filterpointing = precompute_pointing_information(filterpointing, args, configs) diff --git a/tests/sorcha/test_PPAddUncertainty.py b/tests/sorcha/test_PPAddUncertainty.py index 15b87ddd..97eefc80 100644 --- a/tests/sorcha/test_PPAddUncertainty.py +++ b/tests/sorcha/test_PPAddUncertainty.py @@ -22,7 +22,7 @@ import numpy as np import pandas as pd from numpy.testing import assert_almost_equal, assert_equal - +from sorcha.utilities.sorchaConfigs import sorchaConfigs, expertConfigs from sorcha.modules.PPModuleRNG import PerModuleRNG @@ -105,6 +105,8 @@ def test_addUncertainties(): ) configs = {"trailing_losses_on": True, "default_SNR_cut": False} + configs = expertConfigs(**configs) + setattr(configs, "expert",configs) rng = PerModuleRNG(2021) @@ -145,7 +147,8 @@ def test_uncertainties(): ) configs = {"trailing_losses_on": False} - + configs = expertConfigs(**configs) + setattr(configs, "expert",configs) ast_sig_deg, photo_sig, SNR = uncertainties(observations, configs) assert_almost_equal(ast_sig_deg[0], 0.000004, decimal=6) @@ -153,6 +156,8 @@ def test_uncertainties(): assert_almost_equal(SNR[0], 67.673075, decimal=6) configs_trail = {"trailing_losses_on": True} + configs_trail = expertConfigs(**configs_trail) + setattr(configs_trail, "expert",configs_trail) ast_sig_deg_T, photo_sig_T, SNR_T = uncertainties(observations, configs_trail) diff --git a/tests/sorcha/test_PPApplyFOVFilter.py b/tests/sorcha/test_PPApplyFOVFilter.py index a39cb246..6ccbf23a 100644 --- a/tests/sorcha/test_PPApplyFOVFilter.py +++ b/tests/sorcha/test_PPApplyFOVFilter.py @@ -4,7 +4,7 @@ from sorcha.modules.PPModuleRNG import PerModuleRNG from sorcha.utilities.dataUtilitiesForTests import get_test_filepath - +from sorcha.utilities.sorchaConfigs import sorchaConfigs, fovConfigs def test_PPSimpleSensorArea(): from sorcha.modules.PPApplyFOVFilter import PPSimpleSensorArea @@ -58,7 +58,8 @@ def test_PPApplyFOVFilters(): "fill_factor": None, "footprint_edge_threshold": None, } - + configs = fovConfigs(**configs) + setattr(configs, "fov", configs) new_obs = PPApplyFOVFilter(observations, configs, rng) expected = [897478, 897521, 901987, 902035, 907363, 907416, 907470] @@ -70,6 +71,8 @@ def test_PPApplyFOVFilters(): "circle_radius": None, "footprint_edge_threshold": None, } + configs = fovConfigs(**configs) + setattr(configs, "fov", configs) new_obs = PPApplyFOVFilter(observations, configs, rng) expected = [894816, 894838, 897478, 897521, 901987, 902035, 907363, 907416, 907470, 909426] @@ -79,9 +82,11 @@ def test_PPApplyFOVFilters(): configs = { "camera_model": "footprint", "footprint_path": get_test_filepath("detectors_corners.csv"), - "footprint_edge_threshold": None, + "footprint_edge_threshold": 2, } - footprint = Footprint(configs["footprint_path"]) + configs = fovConfigs(**configs) + setattr(configs, "fov", configs) + footprint = Footprint(configs.fov.footprint_path) new_obs = PPApplyFOVFilter(observations, configs, rng, footprint=footprint) expected_ids = [ @@ -96,9 +101,10 @@ def test_PPApplyFOVFilters(): 130.0, 130.0, ] - assert set(new_obs["detectorID"].values) == set(expected_ids) - + configs = {"camera_model": "none"} + configs = fovConfigs(**configs) + setattr(configs, "fov", configs) new_obs = PPApplyFOVFilter(observations, configs, rng) assert len(new_obs) == 10 diff --git a/tests/sorcha/test_PPOutput.py b/tests/sorcha/test_PPOutput.py index fcd1cfc0..e1d91c8c 100644 --- a/tests/sorcha/test_PPOutput.py +++ b/tests/sorcha/test_PPOutput.py @@ -5,10 +5,10 @@ import pytest from numpy.testing import assert_equal -from sorcha.utilities.dataUtilitiesForTests import get_test_filepath +from sorcha.utilities.dataUtilitiesForTests import get_test_filepath, get_demo_filepath from sorcha.utilities.sorchaArguments import sorchaArguments from sorcha.modules.PPOutput import PPWriteOutput - +from sorcha.utilities.sorchaConfigs import outputConfigs, linkingfilterConfigs, sorchaConfigs # some global variables used by tests observations = pd.read_csv(get_test_filepath("test_input_fullobs.csv"), nrows=1) @@ -53,15 +53,13 @@ def test_PPOutWriteHDF5(tmp_path): def test_PPWriteOutput_csv(tmp_path): args.outpath = tmp_path args.outfilestem = "PPOutput_test_out" + config_file_location = get_demo_filepath("sorcha_config_demo.ini") + configs = sorchaConfigs(config_file_location,"rubin_sim") + configs.output.magnitude_decimals = 3 + configs.output.position_decimals = 7 + configs.linkingfilter.ssp_linking_on = False + configs.linkingfilter.drop_unlinked = True - configs = { - "output_columns": "basic", - "position_decimals": 7, - "magnitude_decimals": 3, - "output_format": "csv", - "SSP_linking_on": False, - "drop_unlinked": True, - } expected = np.array( [ @@ -96,15 +94,15 @@ def test_PPWriteOutput_sql(tmp_path): args.outpath = tmp_path args.outfilestem = "PPOutput_test_out" + config_file_location = get_demo_filepath("sorcha_config_demo.ini") + configs = sorchaConfigs(config_file_location,"rubin_sim") + configs.output.magnitude_decimals = 3 + configs.output.position_decimals = 7 + configs.linkingfilter.ssp_linking_on = False + configs.linkingfilter.drop_unlinked = True + configs.output.output_format = "sqlite3" + - configs = { - "output_columns": "basic", - "position_decimals": 7, - "magnitude_decimals": 3, - "output_format": "sqlite3", - "SSP_linking_on": False, - "drop_unlinked": True, - } expected = np.array( [ @@ -148,15 +146,11 @@ def test_PPWriteOutput_sql(tmp_path): def test_PPWriteOutput_all(tmp_path): # additional test to ensure that "all" output option and no rounding works - - configs = { - "output_columns": "all", - "position_decimals": None, - "magnitude_decimals": None, - "output_format": "csv", - "SSP_linking_on": False, - "drop_unlinked": True, - } + config_file_location = get_demo_filepath("sorcha_config_demo.ini") + configs = sorchaConfigs(config_file_location,"rubin_sim") + configs.linkingfilter.ssp_linking_on = False + configs.linkingfilter.drop_unlinked = True + configs.output.output_columns = "all" args.outpath = tmp_path args.outfilestem = "PPOutput_test_all" @@ -233,14 +227,20 @@ def test_PPWriteOutput_all(tmp_path): def test_PPWriteOutput_custom(tmp_path): - configs = { - "output_columns": ["ObjID", "fieldMJD_TAI"], + + config_file_location = get_demo_filepath("sorcha_config_demo.ini") + configs = sorchaConfigs(config_file_location,"rubin_sim") + configs.output.magnitude_decimals = 3 + configs.output.position_decimals = 7 + configs.linkingfilter.ssp_linking_on = False + configs.linkingfilter.drop_unlinked = True + out_dict = { + "output_format": "csv", + "output_columns": "ObjID , fieldMJD_TAI", "position_decimals": 7, "magnitude_decimals": 3, - "output_format": "csv", - "SSP_linking_on": False, - "drop_unlinked": True, } + configs.output = outputConfigs(**out_dict) args.outpath = tmp_path args.outfilestem = "PPOutput_test_multi" @@ -255,7 +255,19 @@ def test_PPWriteOutput_custom(tmp_path): # and now we test the error message - configs["output_columns"] = ["ObjID", "fieldMJD_TAI", "dummy_column"] + configs = sorchaConfigs(config_file_location,"rubin_sim") + configs.output.magnitude_decimals = 3 + configs.output.position_decimals = 7 + configs.linkingfilter.ssp_linking_on = False + configs.linkingfilter.drop_unlinked = True + out_dict = { + "output_format": "csv", + "output_columns": "ObjID , fieldMJD_TAI, dummy_column", + "position_decimals": 7, + "magnitude_decimals": 3, + } + configs.output = outputConfigs(**out_dict) + with pytest.raises(SystemExit) as e: PPWriteOutput(args, configs, observations, 10) @@ -274,16 +286,14 @@ def test_PPWriteOutput_linking_col(tmp_path): observations_linktest["object_linked"] = [True] - configs = { - "output_columns": "basic", - "position_decimals": 7, - "magnitude_decimals": 3, - "output_format": "csv", - "SSP_linking_on": True, - "drop_unlinked": False, - } + config_file_location = get_demo_filepath("sorcha_config_demo.ini") + configs = sorchaConfigs(config_file_location,"rubin_sim") + configs.output.magnitude_decimals = 3 + configs.output.position_decimals = 7 + configs.linkingfilter.ssp_linking_on = True + configs.linkingfilter.drop_unlinked = False + PPWriteOutput(args, configs, observations_linktest, 10) csv_test_in = pd.read_csv(os.path.join(tmp_path, "PPOutput_test_linking.csv")) - assert "object_linked" in csv_test_in.columns diff --git a/tests/sorcha/test_PPRandomizeMeasurements.py b/tests/sorcha/test_PPRandomizeMeasurements.py index 4576649b..777e6cc2 100644 --- a/tests/sorcha/test_PPRandomizeMeasurements.py +++ b/tests/sorcha/test_PPRandomizeMeasurements.py @@ -4,7 +4,7 @@ from sorcha.modules.PPModuleRNG import PerModuleRNG from sorcha.utilities.dataUtilitiesForTests import get_test_filepath - +from sorcha.utilities.sorchaConfigs import expertConfigs def test_randomizePhotometry(): from sorcha.modules.PPRandomizeMeasurements import randomizePhotometry @@ -112,7 +112,8 @@ def test_randomizeAstrometryAndPhotometry(): observations = pd.DataFrame(data_in, index=[0]) configs = {"default_SNR_cut": True, "trailing_losses_on": True} - + configs = expertConfigs(**configs) + setattr(configs,"expert",configs) obs_out = randomizeAstrometryAndPhotometry(observations, configs, PerModuleRNG(2021)) assert_almost_equal(obs_out["trailedSourceMag"][0], 19.663194, decimal=6) @@ -123,7 +124,7 @@ def test_randomizeAstrometryAndPhotometry(): assert obs_out["RATrue_deg"][0] == data_in["RA_deg"] assert obs_out["DecTrue_deg"][0] == data_in["Dec_deg"] - configs["trailing_losses_on"] = False + configs.expert.trailing_losses_on = False obs_out_noloss = randomizeAstrometryAndPhotometry(observations, configs, PerModuleRNG(2021)) assert obs_out_noloss["PSFMag"][0] == obs_out_noloss["trailedSourceMag"][0] diff --git a/tests/sorcha/test_PPStats.py b/tests/sorcha/test_PPStats.py index f60ab6e4..0a3e3d8f 100644 --- a/tests/sorcha/test_PPStats.py +++ b/tests/sorcha/test_PPStats.py @@ -3,7 +3,7 @@ import numpy as np from numpy.testing import assert_equal import pytest - +from sorcha.utilities.sorchaConfigs import linkingfilterConfigs from sorcha.modules.PPStats import stats @@ -28,8 +28,11 @@ def test_PPStats(tmp_path): } test_df = pd.DataFrame(test_dict) - configs = {"SSP_linking_on": True, "drop_unlinked": False} - + + configs = linkingfilterConfigs() + configs.ssp_linking_on = True + configs.drop_unlinked = False + setattr(configs,"linkingfilter",configs) filename_stats = "test_stats" stats(test_df, filename_stats, tmp_path, configs) @@ -94,6 +97,8 @@ def test_PPStats_nolinking(tmp_path): test_df = pd.DataFrame(test_dict) configs = {"SSP_linking_on": False, "drop_unlinked": True} + configs = linkingfilterConfigs() + setattr(configs,"linkingfilter",configs) filename_stats = "test_stats" stats(test_df, filename_stats, tmp_path, configs) @@ -145,7 +150,11 @@ def test_PPStats_justlinking(tmp_path): } test_df = pd.DataFrame(test_dict) - configs = {"SSP_linking_on": True, "drop_unlinked": True} + + configs = linkingfilterConfigs() + configs.ssp_linking_on = True + configs.drop_unlinked = True + setattr(configs,"linkingfilter",configs) filename_stats = "test_stats" stats(test_df, filename_stats, tmp_path, configs) diff --git a/tests/sorcha/test_sorchaConfigs.py b/tests/sorcha/test_sorchaConfigs.py new file mode 100644 index 00000000..a6a2f254 --- /dev/null +++ b/tests/sorcha/test_sorchaConfigs.py @@ -0,0 +1,988 @@ +import pytest + +from sorcha.utilities.dataUtilitiesForTests import get_demo_filepath +from sorcha.lightcurves.lightcurve_registration import LC_METHODS +from sorcha.activity.activity_registration import CA_METHODS +from sorcha.utilities.sorchaConfigs import ( + sorchaConfigs, + inputConfigs, + simulationConfigs, + filtersConfigs, + saturationConfigs, + phasecurvesConfigs, + fovConfigs, + fadingfunctionConfigs, + linkingfilterConfigs, + outputConfigs, + lightcurveConfigs, + activityConfigs, + expertConfigs, +) + +# these are the results we expect from sorcha_config_demo.ini +correct_inputs = { + "ephemerides_type": "ar", + "eph_format": "csv", + "size_serial_chunk": 5000, + "aux_format": "whitespace", + "pointing_sql_query": "SELECT observationId, observationStartMJD as observationStartMJD_TAI, visitTime, visitExposureTime, filter, seeingFwhmGeom as seeingFwhmGeom_arcsec, seeingFwhmEff as seeingFwhmEff_arcsec, fiveSigmaDepth as fieldFiveSigmaDepth_mag , fieldRA as fieldRA_deg, fieldDec as fieldDec_deg, rotSkyPos as fieldRotSkyPos_deg FROM observations order by observationId", +} +correct_simulation = { + "_ephemerides_type": "ar", + "ar_ang_fov": 2.06, + "ar_fov_buffer": 0.2, + "ar_picket": 1, + "ar_obs_code": "X05", + "ar_healpix_order": 6, +} + +correct_filters_read = {"observing_filters": "r,g,i,z,u,y", "survey_name": "rubin_sim"} +correct_filters = { + "observing_filters": ["r", "g", "i", "z", "u", "y"], + "survey_name": "rubin_sim", + "mainfilter": None, + "othercolours": None, +} + +correct_saturation = { + "bright_limit_on": True, + "bright_limit": 16.0, + "_observing_filters": ["r", "g", "i", "z", "u", "y"], +} +correct_saturation_read = {"bright_limit": "16.0", "_observing_filters": ["r", "g", "i", "z", "u", "y"]} + +correct_phasecurve = {"phase_function": "HG"} + +correct_fadingfunction = { + "fading_function_on": True, + "fading_function_width": 0.1, + "fading_function_peak_efficiency": 1.0, +} + +correct_linkingfilter = { + "ssp_linking_on": True, + "drop_unlinked": True, + "ssp_detection_efficiency": 0.95, + "ssp_number_observations": 2, + "ssp_separation_threshold": 0.5, + "ssp_maximum_time": 0.0625, + "ssp_number_tracklets": 3, + "ssp_track_window": 15, + "ssp_night_start_utc": 16.0, +} + +correct_fov = { + "camera_model": "footprint", + "footprint_path": None, + "fill_factor": None, + "circle_radius": None, + "footprint_edge_threshold": 2.0, + "survey_name": "rubin_sim", +} + +correct_fov_read = {"camera_model": "footprint", "footprint_edge_threshold": 2.0, "survey_name": "rubin_sim"} + +correct_output = { + "output_format": "csv", + "output_columns": "basic", + "position_decimals": None, + "magnitude_decimals": None, +} + +correct_lc_model = {"lc_model": None} + +correct_activity = {"comet_activity": None} + +correct_expert = { + "SNR_limit": None, + "SNR_limit_on": False, + "mag_limit": None, + "mag_limit_on": False, + "trailing_losses_on": True, + "default_SNR_cut": True, + "randomization_on": True, + "vignetting_on": True, +} +################################################################################################################################## + +# SORCHA Configs test + + +def test_sorchaConfigs(): + """ + tests that sorchaConfigs reads in config file correctly + """ + # general test to make sure, overall, everything works. checks just one file: sorcha_config_demo.ini + + config_file_location = get_demo_filepath("sorcha_config_demo.ini") + test_configs = sorchaConfigs(config_file_location, "rubin_sim") + # check each section to make sure you get what you expect + assert correct_inputs == test_configs.input.__dict__ + assert correct_simulation == test_configs.simulation.__dict__ + print(correct_filters) + print(test_configs.filters.__dict__) + assert correct_filters == test_configs.filters.__dict__ + assert correct_saturation == test_configs.saturation.__dict__ + assert correct_phasecurve == test_configs.phasecurves.__dict__ + assert correct_fov == test_configs.fov.__dict__ + assert correct_fadingfunction == test_configs.fadingfunction.__dict__ + assert correct_linkingfilter == test_configs.linkingfilter.__dict__ + assert correct_output == test_configs.output.__dict__ + assert correct_lc_model == test_configs.lightcurve.__dict__ + assert correct_activity == test_configs.activity.__dict__ + assert correct_expert == test_configs.expert.__dict__ + + +################################################################################################################################## + +# Inputs section test + + +def test_inputConfigs_int(): + """ + tests that wrong inputs for inputConfigs int attributes is caught correctly + """ + + input_configs = correct_inputs.copy() + # make sure everything populated correctly + test_configs = inputConfigs(**input_configs) + assert test_configs.__dict__ == input_configs + + # now we check some wrong inputs to make sure they're caught correctly. size_serial_chunk has to be castable as an integer + input_configs["size_serial_chunk"] = "five thousand" + + # we're telling pytest that we expect this command to fail with a SystemExit + with pytest.raises(SystemExit) as error_text: + test_configs = inputConfigs(**input_configs) + + # and this checks that the error message is what we expect. + assert ( + error_text.value.code + == "ERROR: expected an int for config parameter size_serial_chunk. Check value in config file." + ) + + +@pytest.mark.parametrize( + "key_name", ["ephemerides_type", "eph_format", "size_serial_chunk", "aux_format", "pointing_sql_query"] +) +def test_inputConfigs_mandatory(key_name): + """ + this loops through the mandatory keys and makes sure the code fails correctly when each is missing + """ + + input_configs = correct_inputs.copy() + + del input_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = inputConfigs(**input_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +@pytest.mark.parametrize( + "key_name, expected_list", + [ + ("ephemerides_type", "['ar', 'external']"), + ("aux_format", "['comma', 'whitespace', 'csv']"), + ("eph_format", "['csv', 'whitespace', 'hdf5']"), + ], +) +def test_inputConfigs_inlist(key_name, expected_list): + """ + this loops through the keys that need to have one of several set values and makes sure the correct error message triggers when they're not + """ + + input_configs = correct_inputs.copy() + + input_configs[key_name] = "definitely_fake_bad_key" + + with pytest.raises(SystemExit) as error_text: + test_configs = inputConfigs(**input_configs) + + assert ( + error_text.value.code + == f"ERROR: value definitely_fake_bad_key for config parameter {key_name} not recognised. Expecting one of: {expected_list}." + ) + + +################################################################################################################################## + +# simulation configs test + + +@pytest.mark.parametrize("key_name", ["ar_ang_fov", "ar_fov_buffer"]) +def test_simulationConfigs_float(key_name): + """ + Tests that wrong inputs for simulationConfigs float attributes is caught correctly + """ + + simulation_configs = correct_simulation.copy() + test_configs = simulationConfigs(**simulation_configs) + assert test_configs.__dict__ == simulation_configs + + simulation_configs[key_name] = "one" + + with pytest.raises(SystemExit) as error_text: + test_configs = simulationConfigs(**simulation_configs) + + assert ( + error_text.value.code + == f"ERROR: expected a float for config parameter {key_name}. Check value in config file." + ) + + +@pytest.mark.parametrize("key_name", ["ar_picket", "ar_healpix_order"]) +def test_simulationConfigs_int(key_name): + """ + Tests that wrong inputs for simulationConfigs int attributes is caught correctly + """ + + simulation_configs = correct_simulation.copy() + test_configs = simulationConfigs(**simulation_configs) + assert test_configs.__dict__ == simulation_configs + + simulation_configs[key_name] = "one" + + with pytest.raises(SystemExit) as error_text: + test_configs = simulationConfigs(**simulation_configs) + + assert ( + error_text.value.code + == f"ERROR: expected an int for config parameter {key_name}. Check value in config file." + ) + + +@pytest.mark.parametrize( + "key_name", ["ar_ang_fov", "ar_fov_buffer", "ar_picket", "ar_obs_code", "ar_healpix_order"] +) +def test_simulationConfigs_mandatory(key_name): + """ + This loops through the mandatory keys and makes sure the code fails correctly when each is missing + """ + + simulation_configs = correct_simulation.copy() + + del simulation_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = simulationConfigs(**simulation_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +@pytest.mark.parametrize( + "key_name", ["ar_ang_fov", "ar_fov_buffer", "ar_picket", "ar_obs_code", "ar_healpix_order"] +) +def test_simulationConfigs_notrequired(key_name): + """ + This loops through the not required keys and makes sure the code fails correctly when they're truthy + """ + + simulation_configs = correct_simulation.copy() + + for name in simulation_configs: + if key_name != name and name != "_ephemerides_type": + simulation_configs[name] = None + simulation_configs["_ephemerides_type"] = "external" + + with pytest.raises(SystemExit) as error_text: + test_configs = simulationConfigs(**simulation_configs) + + assert ( + error_text.value.code == f"ERROR: {key_name} supplied in config file but ephemerides type is external" + ) + + +################################################################################################################################## + +# filters config test + + +def test_filtersConfigs_check_filters(): + """ + Makes sure that when filters are not recognised for survey that error message shows + """ + + filters_configs = correct_filters_read.copy() + + test_configs = filtersConfigs(**filters_configs) + assert test_configs.__dict__ == correct_filters + + filters_configs["observing_filters"] = "a,f,u,g" + with pytest.raises(SystemExit) as error_text: + test_configs = filtersConfigs(**filters_configs) + assert ( + error_text.value.code + == "ERROR: Filter(s) ['a' 'f'] given in config file are not recognised filters for rubin_sim survey." + ) + + +@pytest.mark.parametrize("key_name", ["observing_filters", "survey_name"]) +def test_filtersConfigs_mandatory(key_name): + """ + this loops through the mandatory keys and makes sure the code fails correctly when each is missing + """ + + filter_configs = correct_filters_read.copy() + del filter_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = filtersConfigs(**filter_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +################################################################################################################################## +# saturation configs test + + +def test_saturationConfigs(): + """ + Tests that error occurs when list of saturation limits is not the same length as list of observing filters. + Also tests that error occurs when brightness limits can't be parsed. + """ + + saturation_configs = correct_saturation_read.copy() + + # make sure everything populated correctly + test_configs = saturationConfigs(**saturation_configs) + assert test_configs.__dict__ == correct_saturation + + saturation_configs["bright_limit"] = "10,2" + + with pytest.raises(SystemExit) as error_text: + test_configs = saturationConfigs(**saturation_configs) + + assert ( + error_text.value.code + == "ERROR: list of saturation limits is not the same length as list of observing filters." + ) + saturation_configs["bright_limit"] = "10;2" + + with pytest.raises(SystemExit) as error_text: + test_configs = saturationConfigs(**saturation_configs) + + assert ( + error_text.value.code == "ERROR: could not parse brightness limits. Check formatting and try again." + ) + + +@pytest.mark.parametrize("key_name", ["_observing_filters"]) +def test_saturationConfigs_mandatory(key_name): + """ + this loops through the mandatory keys and makes sure the code fails correctly when each is missing + """ + + saturation_configs = correct_saturation_read.copy() + + del saturation_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = saturationConfigs(**saturation_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +################################################################################################################################## + +# phasecurve configs tests + + +@pytest.mark.parametrize("key_name", ["phase_function"]) +def test_phasecurveConfigs_mandatory(key_name): + """ + this loops through the mandatory keys and makes sure the code fails correctly when each is missing + """ + + phasecurves_configs = correct_phasecurve.copy() + + del phasecurves_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = phasecurvesConfigs(**phasecurves_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +@pytest.mark.parametrize( + "key_name, expected_list", + [("phase_function", "['HG', 'HG1G2', 'HG12', 'linear', 'none']")], +) +def test_phasecurveConfigs_inlist(key_name, expected_list): + """ + this loops through the keys that need to have one of several set values and makes sure the correct error message triggers when they're not + """ + + phasecurve_configs = correct_phasecurve.copy() + + phasecurve_configs[key_name] = "definitely_fake_bad_key" + + with pytest.raises(SystemExit) as error_text: + test_configs = phasecurvesConfigs(**phasecurve_configs) + print(error_text.value.code) + print( + f"ERROR: value definitely_fake_bad_key for config parameter {key_name} not recognised. Expecting one of: {expected_list}." + ) + assert ( + error_text.value.code + == f"ERROR: value definitely_fake_bad_key for config parameter {key_name} not recognised. Expecting one of: {expected_list}." + ) + + +################################################################################################################################## + +# fov configs test + + +def test_fovConfigs_inlist(): + """ + this loops through the keys that need to have one of several set values and makes sure the correct error message triggers when they're not + """ + + fov_configs = correct_fov_read.copy() + + test_configs = fovConfigs(**fov_configs) + assert test_configs.__dict__ == correct_fov + + fov_configs["camera_model"] = "fake_model" + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + assert ( + error_text.value.code + == "ERROR: value fake_model for config parameter camera_model not recognised. Expecting one of: ['circle', 'footprint', 'none']." + ) + + +def test_fovConfigs_surveyname(): + """ + Tests that error occurs when survey is not one provided with a default detector. + """ + + fov_configs = correct_fov_read.copy() + + test_configs = fovConfigs(**fov_configs) + assert test_configs.__dict__ == correct_fov + + fov_configs["survey_name"] = "fake" + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + assert ( + error_text.value.code + == "ERROR: a default detector footprint is currently only provided for LSST; please provide your own footprint file." + ) + + +@pytest.mark.parametrize("key_name", ["fill_factor", "circle_radius"]) +def test_fovConfigs_camera_footprint_notrequired(key_name): + """ + this loops through the mandatory keys and keys that shouldn't exist and makes sure the code fails correctly when each is missing + """ + + fov_configs = correct_fov_read.copy() + + # check these dont exist + if key_name == "fill_factor" or key_name == "circle_raidus": + fov_configs[key_name] = 0.5 + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + reason = 'but camera model is not "circle".' + assert error_text.value.code == f"ERROR: {key_name} supplied in config file {reason}" + + +def test_fovConfigs_circle_mandatory(): + """ + Makes sure the code fails when either "fill_factor" or "circle_radius" is missing + """ + + fov_configs = correct_fov_read.copy() + fov_configs["camera_model"] = "circle" + + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + assert ( + error_text.value.code + == 'ERROR: either "fill_factor" or "circle_radius" must be specified for circular footprint.' + ) + + +@pytest.mark.parametrize("key_name", ["fill_factor", "circle_radius"]) +def test_fovConfigs_bounds(key_name): + """ + Tests that values in fovConfigs are creating error messages when out of bounds + """ + + fov_configs = correct_fov_read.copy() + fov_configs["camera_model"] = "circle" + fov_configs[key_name] = -0.1 + if key_name == "fill_factor": + + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + assert error_text.value.code == "ERROR: fill_factor out of bounds. Must be between 0 and 1." + elif key_name == "circle_radius": + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + assert error_text.value.code == "ERROR: circle_radius is negative." + + +def test_fovConfigs_camera_circle_notrequired(): + """ + This loops through the not required keys and makes sure the code fails correctly when they're truthy + """ + + fov_configs = correct_fov_read.copy() + fov_configs["camera_model"] = "circle" + fov_configs["fill_factor"] = 0.5 + with pytest.raises(SystemExit) as error_text: + test_configs = fovConfigs(**fov_configs) + assert ( + error_text.value.code + == f'ERROR: footprint_edge_threshold supplied in config file but camera model is not "footprint".' + ) + + +################################################################################################################################## + +# fadingfunction config tests + + +@pytest.mark.parametrize("key_name", ["fading_function_width", "fading_function_peak_efficiency"]) +def test_fadingfunctionConfig_on_float(key_name): + """ + Tests that wrong inputs for fadingfunctionConfig float attributes is caught correctly + """ + + fadingfunction_configs = correct_fadingfunction.copy() + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + assert test_configs.__dict__ == fadingfunction_configs + + fadingfunction_configs[key_name] = "ten" + with pytest.raises(SystemExit) as error_text: + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + + assert ( + error_text.value.code + == f"ERROR: expected a float for config parameter {key_name}. Check value in config file." + ) + + +def test_fadingfunctionConfig_bool(): + """ + Tests that wrong inputs for fadingfunctionConfig bool attributes is caught correctly + """ + + fadingfunction_configs = correct_fadingfunction.copy() + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + assert test_configs.__dict__ == fadingfunction_configs + + fadingfunction_configs["fading_function_on"] = "ten" + with pytest.raises(SystemExit) as error_text: + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + + assert ( + error_text.value.code + == "ERROR: expected a bool for config parameter fading_function_on. Check value in config file." + ) + + +@pytest.mark.parametrize( + "key_name", ["fading_function_on", "fading_function_width", "fading_function_peak_efficiency"] +) +def test_fadingfunction_mandatory(key_name): + """ + this loops through the mandatory keys and keys that shouldn't exist and makes sure the code fails correctly when each is missing + """ + + fadingfunction_configs = correct_fadingfunction.copy() + + del fadingfunction_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +@pytest.mark.parametrize("key_name", ["fading_function_width", "fading_function_peak_efficiency"]) +def test_fadingfunction_notrequired(key_name): + """ + This loops through the not required keys and makes sure the code fails correctly when they're truthy + """ + + # tests that "fading_function_width" and "fading_function_peak_efficiency" are not called when "fading_function_on" is false + fadingfunction_configs = correct_fadingfunction.copy() + fadingfunction_configs["fading_function_on"] = "False" + fadingfunction_configs["fading_function_width"] = None + fadingfunction_configs["fading_function_peak_efficiency"] = None + fadingfunction_configs[key_name] = 0.5 + with pytest.raises(SystemExit) as error_text: + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + assert ( + error_text.value.code == f"ERROR: {key_name} supplied in config file but fading_function_on is False." + ) + + +@pytest.mark.parametrize("key_name", ["fading_function_width", "fading_function_peak_efficiency"]) +def test_fadingfunction_outofbounds(key_name): + """ + Tests that values in fadingfunctionConfigs are creating error messages when out of bounds + """ + + fadingfunction_configs = correct_fadingfunction.copy() + fadingfunction_configs[key_name] = 10 + if key_name == "fading_function_width": + with pytest.raises(SystemExit) as error_text: + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + assert ( + error_text.value.code + == "ERROR: fading_function_width out of bounds. Must be greater than zero and less than 0.5." + ) + if key_name == "fading_function_peak_efficiency": + with pytest.raises(SystemExit) as error_text: + test_configs = fadingfunctionConfigs(**fadingfunction_configs) + assert ( + error_text.value.code + == "ERROR: fading_function_peak_efficiency out of bounds. Must be between 0 and 1." + ) + + +################################################################################################################################## + +# linkingfilter tests + + +@pytest.mark.parametrize( + "key_name", + ["ssp_detection_efficiency", "ssp_separation_threshold", "ssp_maximum_time", "ssp_night_start_utc"], +) +def test_linkingfilterConfigs_float(key_name): + """ + Tests that wrong inputs for linkingfilterConfigs float attributes is caught correctly + """ + + linkingfilter_configs = correct_linkingfilter.copy() + test_configs = linkingfilterConfigs(**linkingfilter_configs) + assert test_configs.__dict__ == linkingfilter_configs + + linkingfilter_configs[key_name] = "one" + + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert ( + error_text.value.code + == f"ERROR: expected a float for config parameter {key_name}. Check value in config file." + ) + + +@pytest.mark.parametrize("key_name", ["ssp_number_observations", "ssp_number_tracklets", "ssp_track_window"]) +def test_linking_filter_int(key_name): + """ + Tests that wrong inputs for linkingfilterConfigs int attributes is caught correctly + """ + + linkingfilter_configs = correct_linkingfilter.copy() + test_configs = linkingfilterConfigs(**linkingfilter_configs) + assert test_configs.__dict__ == linkingfilter_configs + + linkingfilter_configs[key_name] = "one" + + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert ( + error_text.value.code + == f"ERROR: expected an int for config parameter {key_name}. Check value in config file." + ) + + +@pytest.mark.parametrize( + "key_name", + [ + "ssp_detection_efficiency", + "ssp_separation_threshold", + "ssp_maximum_time", + "ssp_night_start_utc", + "ssp_number_observations", + "ssp_number_tracklets", + "ssp_track_window", + ], +) +def test_linkingfilter_bounds(key_name): + """ + Tests that values in linkingfilterConfigs are creating error messages when out of bounds + """ + + linkingfilter_configs = correct_linkingfilter.copy() + + if key_name == "ssp_maximum_time" or key_name == "ssp_track_window": + linkingfilter_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert error_text.value.code == f"ERROR: {key_name} is negative." + elif key_name == "ssp_separation_threshold" or key_name == "ssp_number_observations": + linkingfilter_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert error_text.value.code == f"ERROR: {key_name} is zero or negative." + elif key_name == "ssp_number_tracklets": + linkingfilter_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert error_text.value.code == "ERROR: ssp_number_tracklets is zero or less." + elif key_name == "ssp_detection_efficiency": + linkingfilter_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert ( + error_text.value.code + == "ERROR: ssp_detection_efficiency out of bounds (should be between 0 and 1)." + ) + elif key_name == "ssp_night_start_utc": + linkingfilter_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert ( + error_text.value.code == "ERROR: ssp_night_start_utc must be a valid time between 0 and 24 hours." + ) + + +def test_linkingfilter_only_some_sspvar(): + """ + Males sure error message shows when only some SSP variables are provided + """ + linkingfilter_configs = correct_linkingfilter.copy() + + linkingfilter_configs["ssp_separation_threshold"] = None + + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert ( + error_text.value.code + == "ERROR: only some ssp linking variables supplied. Supply all five required variables for ssp linking filter, or none to turn filter off." + ) + + +def test_linkingfilter_bool(): + """ + Tests that wrong inputs for linkingfilterConfigs bool attributes is caught correctly + """ + + linkingfilter_configs = correct_linkingfilter.copy() + + linkingfilter_configs["drop_unlinked"] = "fake" + + with pytest.raises(SystemExit) as error_text: + test_configs = linkingfilterConfigs(**linkingfilter_configs) + + assert ( + error_text.value.code + == f"ERROR: expected a bool for config parameter drop_unlinked. Check value in config file." + ) + + +################################################################################################################################## + +# output config tests + + +@pytest.mark.parametrize("key_name", ["output_format", "output_columns"]) +def test_outputConfigs_mandatory(key_name): + """ + this loops through the mandatory keys and keys that shouldn't exist and makes sure the code fails correctly when each is missing + """ + + output_configs = correct_output.copy() + + del output_configs[key_name] + + with pytest.raises(SystemExit) as error_text: + test_configs = outputConfigs(**output_configs) + + assert ( + error_text.value.code + == f"ERROR: No value found for required key {key_name} in config file. Please check the file and try again." + ) + + +@pytest.mark.parametrize( + "key_name, expected_list", + [ + ("output_format", "['csv', 'sqlite3', 'hdf5']"), + ("output_columns", "['basic', 'all']"), + ], +) +def test_outputConfigs_inlist(key_name, expected_list): + """ + this loops through the keys that need to have one of several set values and makes sure the correct error message triggers when they're not + """ + + output_configs = correct_output.copy() + + output_configs[key_name] = "definitely_fake_bad_key" + + with pytest.raises(SystemExit) as error_text: + test_configs = outputConfigs(**output_configs) + assert ( + error_text.value.code + == f"ERROR: value definitely_fake_bad_key for config parameter {key_name} not recognised. Expecting one of: {expected_list}." + ) + + +@pytest.mark.parametrize("key_name", ["position_decimals", "magnitude_decimals"]) +def test_outputConfigs_decimel_check(key_name): + """ + Checks that if deciamels are not float or are negative it is caught correctly + """ + correct_output = { + "output_format": "csv", + "output_columns": "basic", + "position_decimals": 5, + "magnitude_decimals": 5, + } + + output_configs = correct_output.copy() + + output_configs[key_name] = "definitely_fake_bad_key" + + with pytest.raises(SystemExit) as error_text: + test_configs = outputConfigs(**output_configs) + assert ( + error_text.value.code + == f"ERROR: expected a float for config parameter {key_name}. Check value in config file." + ) + correct_output = { + "output_format": "csv", + "output_columns": "basic", + "position_decimals": 5, + "magnitude_decimals": 5, + } + output_configs = correct_output.copy() + output_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = outputConfigs(**output_configs) + assert error_text.value.code == "ERROR: decimal places config variables cannot be negative." + + +################################################################################################################################## + +# lightcurve config test + + +def test_lightcurve_config(): + """ + makes sure that if lightcurve model provided is not registered an error occurs + """ + + lightcurve_configs = correct_lc_model.copy() + + lightcurve_configs["lc_model"] = "what_model" + + with pytest.raises(SystemExit) as error_text: + test_configs = lightcurveConfigs(**lightcurve_configs) + assert ( + error_text.value.code + == f"The requested light curve model, 'what_model', is not registered. Available lightcurve options are: {list(LC_METHODS.keys())}" + ) + + +################################################################################################################################## + +# activity config test + + +def test_activity_config(): + """ + makes sure that if comet activity model provided is not registered an error occurs + """ + + activity_configs = correct_activity.copy() + + activity_configs["comet_activity"] = "nothing" + + with pytest.raises(SystemExit) as error_text: + test_configs = activityConfigs(**activity_configs) + assert ( + error_text.value.code + == f"The requested comet activity model, 'nothing', is not registered. Available comet activity models are: {list(CA_METHODS.keys())}" + ) + + +################################################################################################################################## + +# expert config test + + +@pytest.mark.parametrize("key_name, error_name", [("SNR_limit", "SNR"), ("mag_limit", "magnitude")]) +def test_expert_config_bounds(key_name, error_name): + """ + Tests that values in expertConfigs are creating error messages when out of bounds + """ + + expect_configs = correct_expert.copy() + expect_configs[key_name] = -5 + with pytest.raises(SystemExit) as error_text: + test_configs = expertConfigs(**expect_configs) + + assert error_text.value.code == f"ERROR: {error_name} limit is negative." + + +def test_expert_config_exclusive(): + """ + Makes sure that when both SNR limit and magnitude limit are specified that an error occurs + """ + + expect_configs = correct_expert.copy() + expect_configs["mag_limit"] = 5 + expect_configs["SNR_limit"] = 5 + with pytest.raises(SystemExit) as error_text: + test_configs = expertConfigs(**expect_configs) + + assert ( + error_text.value.code + == "ERROR: SNR limit and magnitude limit are mutually exclusive. Please delete one or both from config file." + ) + + +@pytest.mark.parametrize( + "key_name", ["trailing_losses_on", "default_SNR_cut", "randomization_on", "vignetting_on"] +) +def test_expertConfig_bool(key_name): + """ + Tests that wrong inputs for expertConfigs bool attributes is caught correctly + """ + + expect_configs = correct_expert.copy() + expect_configs[key_name] = "fake" + with pytest.raises(SystemExit) as error_text: + test_configs = expertConfigs(**expect_configs) + + assert ( + error_text.value.code + == f"ERROR: expected a bool for config parameter {key_name}. Check value in config file." + )