Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove all the print() statements and use some form of logging instead #196

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion plot.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
import logging

logger = logging.getLogger()
logger.setLevel("DEBUG")
file_handler = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handler.setLevel("DEBUG")
file_handler.setFormatter(
logging.Formatter(fmt="%(lineno)s---%(asctime)s---%(message)s")
)
logger.addHandler(file_handler)

from copy import deepcopy

import matplotlib.dates as mdates
Expand Down Expand Up @@ -36,7 +47,7 @@ def backtest_stats(account_value, value_col_name="account_value"):
transactions=None,
turnover_denom="AGB",
)
print(perf_stats_all)
logging.info(perf_stats_all)
return perf_stats_all


Expand Down
13 changes: 12 additions & 1 deletion test.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
import logging

logger = logging.getLogger()
logger.setLevel("DEBUG")
file_handler = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handler.setLevel("DEBUG")
file_handler.setFormatter(
logging.Formatter(fmt="%(lineno)s---%(asctime)s---%(message)s")
)
logger.addHandler(file_handler)

from agents.elegantrl_models import DRLAgent as DRLAgent_erl
from agents.rllib_models import DRLAgent as DRLAgent_rllib
from agents.stablebaselines3_models import DRLAgent as DRLAgent_sb3
Expand Down Expand Up @@ -41,7 +52,7 @@ def test(
# elegantrl needs state dim, action dim and net dim
net_dimension = kwargs.get("net_dimension", 2**7)
cwd = kwargs.get("cwd", "./" + str(model_name))
print("price_array: ", len(price_array))
logging.info("price_array: ", len(price_array))

if drl_lib == "elegantrl":
episode_total_assets = DRLAgent_erl.DRL_prediction(
Expand Down
3 changes: 1 addition & 2 deletions trade.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from test import test

from meta.env_stock_trading.env_stock_papertrading import (
AlpacaPaperTrading,
)
from test import test


def trade(
Expand Down
15 changes: 13 additions & 2 deletions train.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,14 @@
import logging

logger = logging.getLogger()
logger.setLevel("DEBUG")
file_handler = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handler.setLevel("DEBUG")
file_handler.setFormatter(
logging.Formatter(fmt="%(lineno)s---%(asctime)s---%(message)s")
)
logger.addHandler(file_handler)

from agents.elegantrl_models import DRLAgent as DRLAgent_erl
from agents.rllib_models import DRLAgent as DRLAgent_rllib
from agents.stablebaselines3_models import DRLAgent as DRLAgent_sb3
Expand Down Expand Up @@ -88,9 +99,9 @@ def train(
tb_log_name=model_name,
total_timesteps=total_timesteps,
)
print("Training finished!")
logging.info("Training finished!")
trained_model.save(cwd)
print("Trained model saved in " + str(cwd))
logging.info("Trained model saved in " + str(cwd))

else:
raise ValueError("DRL library input is NOT supported. Please check.")
37 changes: 24 additions & 13 deletions tutorials/1-Introduction/China_A_share_market_tushare.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,17 @@

display.set_matplotlib_formats("svg")

import logging

logger = logging.getLogger()
logger.setLevel("DEBUG")
file_handler = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handler.setLevel("DEBUG")
file_handler.setFormatter(
logging.Formatter(fmt="%(lineno)s---%(asctime)s---%(message)s")
)
logger.addHandler(file_handler)

from meta import config
from meta.data_processor import DataProcessor
from main import check_and_make_directories
Expand Down Expand Up @@ -42,7 +53,7 @@

pd.options.display.max_columns = None

print("ALL Modules have been imported!")
logging.info("ALL Modules have been imported!")


# %% md
Expand Down Expand Up @@ -125,32 +136,32 @@
# add_technical_indicator
p.add_technical_indicator(config.INDICATORS)
p.clean_data()
print(f"p.dataframe: {p.dataframe}")
logging.info(f"p.dataframe: {p.dataframe}")

# %% md

### Split traning dataset

train = p.data_split(p.dataframe, TRAIN_START_DATE, TRAIN_END_DATE)
print(f"len(train.tic.unique()): {len(train.tic.unique())}")
logging.info(f"len(train.tic.unique()): {len(train.tic.unique())}")

# %%

print(f"train.tic.unique(): {train.tic.unique()}")
logging.info(f"train.tic.unique(): {train.tic.unique()}")

# %%

print(f"train.head(): {train.head()}")
logging.info(f"train.head(): {train.head()}")

# %%

print(f"train.shape: {train.shape}")
logging.info(f"train.shape: {train.shape}")

# %%

stock_dimension = len(train.tic.unique())
state_space = stock_dimension * (len(config.INDICATORS) + 2) + 1
print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
logging.info(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")

# %% md

Expand Down Expand Up @@ -182,7 +193,7 @@
# %%

env_train, _ = e_train_gym.get_sb_env()
print(f"print(type(env_train)): {print(type(env_train))}")
logging.info(f"print(type(env_train)): {logging.info(type(env_train))}")

# %%

Expand Down Expand Up @@ -251,7 +262,7 @@
# %%

df_actions.to_csv("action.csv", index=False)
print(f"df_actions: {df_actions}")
logging.info(f"df_actions: {df_actions}")

# %% md

Expand Down Expand Up @@ -298,8 +309,8 @@
transactions=None,
turnover_denom="AGB",
)
print("==============DRL Strategy Stats===========")
print(f"perf_stats_all: {perf_stats_all}")
logging.info("==============DRL Strategy Stats===========")
logging.info(f"perf_stats_all: {perf_stats_all}")

# %%

Expand All @@ -317,6 +328,6 @@
transactions=None,
turnover_denom="AGB",
)
print("==============Baseline Strategy Stats===========")
logging.info("==============Baseline Strategy Stats===========")

print(f"perf_stats_all: {perf_stats_all}")
logging.info(f"perf_stats_all: {perf_stats_all}")
47 changes: 30 additions & 17 deletions tutorials/2-Advance/Crypto_Feature_Importance.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,17 @@

warnings.filterwarnings("ignore")

import logging

logger = logging.getLogger()
logger.setLevel("DEBUG")
file_handler = logging.FileHandler("./log.txt", mode="a", encoding="utf-8")
file_handler.setLevel("DEBUG")
file_handler.setFormatter(
logging.Formatter(fmt="%(lineno)s---%(asctime)s---%(message)s")
)
logger.addHandler(file_handler)

import pandas as pd
import numpy as np
from IPython import display
Expand Down Expand Up @@ -56,7 +67,7 @@

pd.options.display.max_columns = None

print("ALL Modules have been imported!")
logging.info("ALL Modules have been imported!")

# %% md

Expand Down Expand Up @@ -123,7 +134,7 @@
p.download_data(ticker_list=ticker_list)
p.clean_data()
df = p.dataframe
print(f"p.dataframe: {p.dataframe}")
logging.info(f"p.dataframe: {p.dataframe}")


def add_technical_indicator(df, tech_indicator_list):
Expand All @@ -146,12 +157,12 @@ def add_technical_indicator(df, tech_indicator_list):


processed_df = add_technical_indicator(df, technical_indicator_list)
print(f"processed_df: {processed_df.head()}")
logging.info(f"processed_df: {processed_df.head()}")

# Drop unecessary columns and make time as index
processed_df.index = pd.to_datetime(processed_df.time)
processed_df.drop("time", inplace=True, axis=1)
print(processed_df.tail(20))
logging.info(processed_df.tail(20))


# IMPORTANT: Make sure that pd.Timedelta() is according to the time_interval to get the volatility for that time interval
Expand All @@ -174,7 +185,7 @@ def get_vol(prices, span=100):


data_ohlcv = processed_df.assign(volatility=get_vol(processed_df.close)).dropna()
print("data_ohlcv: ", data_ohlcv.head())
logging.info("data_ohlcv: ", data_ohlcv.head())

##Adding Path Dependency: Triple-Barrier Method
###The labeling schema is defined as follows:
Expand Down Expand Up @@ -247,7 +258,7 @@ def get_barriers(


barriers = get_barriers()
print("barriers: ", barriers.head())
logging.info("barriers: ", barriers.head())

####Function to get label for the dataset (0, 1, 2)
# 0: hit the stoploss
Expand Down Expand Up @@ -306,7 +317,7 @@ def get_labels():


get_labels()
print("barriers after labeling: ", barriers.head())
logging.info("barriers after labeling: ", barriers.head())
# Merge the barriers with the main dataset and drop the last t_final + 1 barriers (as they are too close to the end)

data_ohlcv = data_ohlcv.merge(
Expand All @@ -315,9 +326,9 @@ def get_labels():
right_on="time",
)
data_ohlcv.drop(data_ohlcv.tail(t_final + 1).index, inplace=True)
print("data_ohlcv after labeling: ", data_ohlcv.head())
logging.info("data_ohlcv after labeling: ", data_ohlcv.head())
# Count barrier hits ( 0 = stoploss, 1 = timeout, 2 = profit take)
print(pd.Series(data_ohlcv["label_barrier"]).value_counts())
logging.info(pd.Series(data_ohlcv["label_barrier"]).value_counts())

###Copying the Neural Network function from AI4Finance's ActorPPO agent
# https://github.com/AI4Finance-Foundation/ElegantRL/blob/master/elegantrl/agents/net.py
Expand Down Expand Up @@ -365,7 +376,7 @@ def forward(self, x):


model_NN1 = Net()
print(model_NN1)
logging.info(model_NN1)


class ClassifierDataset(Dataset):
Expand Down Expand Up @@ -435,7 +446,7 @@ def __len__(self):
test_loader = DataLoader(dataset=test_dataset, batch_size=1)
# Check GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
logging.info(device)

# Set optimizer
criterion = nn.CrossEntropyLoss()
Expand All @@ -457,7 +468,7 @@ def train(fold, model, device, trainloader, optimizer, epoch):
optimizer.step()

if batch_idx % 100 == 0:
print(
logging.info(
"Train Fold/Epoch: {}/{} [{}/{} ({:.0f}%)]\ttrain_loss: {:.6f}".format(
fold,
epoch,
Expand Down Expand Up @@ -500,7 +511,7 @@ def test(fold, model, device, test_loader, correct_train, train_loss):

# Print train accuracy for epoch
# TODO: still a bug in summed up batch train loss
print(
logging.info(
"\nTrain set for fold {}: Average train_loss: {:.4f}, Accuracy: {}/{} ({:.5f}%)".format(
fold,
train_loss,
Expand All @@ -511,7 +522,7 @@ def test(fold, model, device, test_loader, correct_train, train_loss):
)

# Print test result for epoch
print(
logging.info(
"Test set for fold {}: Average test_loss: {:.4f}, Accuracy: {}/{} ({:.5f}%)\n".format(
fold,
test_loss,
Expand Down Expand Up @@ -559,11 +570,13 @@ def test(fold, model, device, test_loader, correct_train, train_loss):
y_pred_nn1 = y_pred_nn1.cpu().detach().numpy()

# print predction values
print("labels in prediction:", np.unique(y_pred_nn1), "\n")
logging.info("labels in prediction:", np.unique(y_pred_nn1), "\n")

# print report
label_names = ["long", "no bet", "short"]
print(classification_report(y_test.astype(int), y_pred_nn1, target_names=label_names))
logging.info(
classification_report(y_test.astype(int), y_pred_nn1, target_names=label_names)
)


def perturbation_rank(model, x, y, names):
Expand Down Expand Up @@ -608,4 +621,4 @@ def perturbation_rank(model, x, y, names):
names,
)

print(display(rank))
logging.info(display(rank))
Loading