diff --git a/examples/scripts/binary_classifier.py b/examples/scripts/binary_classifier.py index b6f542cc..2c3b8e69 100644 --- a/examples/scripts/binary_classifier.py +++ b/examples/scripts/binary_classifier.py @@ -20,11 +20,11 @@ automl = AutoML( #results_path="AutoML_30", - #algorithms=["Neural Network"], - total_time_limit=200, + algorithms=["Random Forest"], + total_time_limit=20, explain_level=0, # validation={"validation_type": "split"}, - mode="Compete", + mode="Explain", # validation={"validation_type": "split"} validation={ "validation_type": "kfold", diff --git a/examples/scripts/multi_class_classifier.py b/examples/scripts/multi_class_classifier.py index 56026e06..27f7e235 100644 --- a/examples/scripts/multi_class_classifier.py +++ b/examples/scripts/multi_class_classifier.py @@ -19,7 +19,7 @@ automl = AutoML( # results_path="AutoML_41", - algorithms=["Xgboost"], + algorithms=["Random Forest"], # algorithms=["Neural Network"], # "Linear", # "Xgboost", diff --git a/setup.py b/setup.py index cbbb064a..768e98e2 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( name='mljar-supervised', - version='0.6.0', + version='0.6.1', description='Automates Machine Learning Pipeline with Feature Engineering and Hyper-Parameters Tuning', long_description=long_description, long_description_content_type="text/markdown", diff --git a/supervised/__init__.py b/supervised/__init__.py index 97fb8ad1..6638e099 100644 --- a/supervised/__init__.py +++ b/supervised/__init__.py @@ -1,3 +1,3 @@ -__version__ = "0.6.0" +__version__ = "0.6.1" from supervised.automl import AutoML diff --git a/supervised/automl.py b/supervised/automl.py index 50f60d1a..e4317f72 100644 --- a/supervised/automl.py +++ b/supervised/automl.py @@ -943,7 +943,9 @@ def fit(self, X_train, y_train, X_validation=None, y_validation=None): self._algorithms, ) - self._time_ctrl.log_time("prepare_data", "prep_0", "prepare_data", time.time()-self._start_time) + self._time_ctrl.log_time( + "prepare_data", "prepare_data", "prepare_data", time.time() - self._start_time + ) for step in steps: self._fit_level = step diff --git a/supervised/tuner/time_controller.py b/supervised/tuner/time_controller.py index 97864b49..36c659e0 100644 --- a/supervised/tuner/time_controller.py +++ b/supervised/tuner/time_controller.py @@ -97,12 +97,12 @@ def enough_time_for_step(self, fit_level): time_should_use = self.time_should_use(fit_level) - #print("not_so_random should use", time_should_use) - #print(total_time_spend) - #print( + # print("not_so_random should use", time_should_use) + # print(total_time_spend) + # print( # self.step_spend("simple_algorithms") # + self.step_spend("default_algorithms") - #) + # ) if total_time_spend > time_should_use + self.step_spend( "simple_algorithms" @@ -164,7 +164,7 @@ def enough_time(self, model_type, step): time_left = self._total_time_limit - total_time_spend # no time left, do not train any more models, sorry ... if time_left < 0: - #print("No time left", time_left) + # print("No time left", time_left) return False # there is still time and model_type was not tested yet @@ -175,7 +175,7 @@ def enough_time(self, model_type, step): # check the fit level type # we dont want to spend too much time on one step if not self.enough_time_for_step(step): - #print("Not enough time for step", step) + # print("Not enough time for step", step) return False # check if there is enough time for model to train @@ -233,8 +233,8 @@ def log_time(self, model_name, model_type, fit_level, train_time): "train_time": train_time, } ] - #print(pd.DataFrame(self._spend)) - #print("Already spend", self.already_spend()) + # print(pd.DataFrame(self._spend)) + # print("Already spend", self.already_spend()) def step_spend(self, step): return np.sum([s["train_time"] for s in self._spend if s["fit_level"] == step])