Skip to content

Commit

Permalink
Merge mai
Browse files Browse the repository at this point in the history
  • Loading branch information
phschiele committed Dec 16, 2023
2 parents 07edcb3 + c5a986c commit 250e8e9
Show file tree
Hide file tree
Showing 9 changed files with 829 additions and 97 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/basic.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ jobs:
- name: experiments
shell: bash
run: |
make experiment
make experiments
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ fmt: install ## Run autoformatting and linting
clean: ## Clean up caches and build artifacts
@git clean -X -d -f

.PHONY: experiment
experiment: install ## Run all experiment
.PHONY: experiments
experiments: install ## Run all experiment
${VENV}/bin/python experiments.py

.PHONY: help
Expand Down
5 changes: 5 additions & 0 deletions experiments.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
from pathlib import Path

from experiments.taming import main as taming_main
from experiments.scaling_small import main as scaling_small_main
from experiments.scaling_large import main as scaling_large_main

if __name__ == "__main__":
Path("checkpoints").mkdir(exist_ok=True)
Path("figures").mkdir(exist_ok=True)
scaling_small_main()
scaling_large_main()
taming_main()
52 changes: 37 additions & 15 deletions experiments/backtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

from dataclasses import dataclass
from functools import lru_cache
import os
from pathlib import Path
import pickle
import time
Expand All @@ -20,9 +21,12 @@ def data_folder():
def load_data() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
prices = pd.read_csv(data_folder() / "prices.csv", index_col=0, parse_dates=True)
spread = pd.read_csv(data_folder() / "spreads.csv", index_col=0, parse_dates=True)
volume = pd.read_csv(data_folder() / "volumes.csv", index_col=0, parse_dates=True)
rf = pd.read_csv(data_folder() / "rf.csv", index_col=0, parse_dates=True).iloc[:, 0]
return prices, spread, volume, rf
if os.getenv("CI"):
prices = prices.tail(2000)
spread = spread.tail(2000)
rf = rf.tail(2000)
return prices, spread, rf


@dataclass
Expand All @@ -33,9 +37,9 @@ class OptimizationInput:

prices: pd.DataFrame
mean: pd.Series
covariance: pd.DataFrame
chol: np.array
volas: np.array
spread: pd.DataFrame
volume: pd.DataFrame
quantities: np.ndarray
cash: float
risk_target: float
Expand All @@ -48,14 +52,21 @@ def n_assets(self) -> int:

def run_backtest(
strategy: Callable, risk_target: float, verbose: bool = False
) -> tuple[pd.Series, pd.DataFrame]:
) -> BacktestResult:
"""
Run a simplified backtest for a given strategy.
At time t we use data from t-lookback to t to compute the optimal portfolio
weights and then execute the trades at time t.
"""

prices, spread, volume, rf = load_data()
prices, spread, rf = load_data()
training_length = 1250
prices, spread, rf = (
prices.iloc[training_length:],
spread.iloc[training_length:],
rf.iloc[training_length:],
)

n_assets = prices.shape[1]

lookback = 500
Expand All @@ -77,12 +88,15 @@ def run_backtest(
.dropna()
) # At time t includes data up to t+1
covariance_df = returns.ewm(halflife=125).cov() # At time t includes data up to t
days = returns.index
indices = range(lookback, len(prices) - forward_smoothing)
days = [prices.index[t] for t in indices]
covariances = {}
cholesky_factorizations = {}
for day in days:
covariances[day] = covariance_df.loc[day]
cholesky_factorizations[day] = np.linalg.cholesky(covariances[day].values)

for t in range(lookback, len(prices) - forward_smoothing):
for t in indices:
start_time = time.perf_counter()

day = prices.index[t]
Expand All @@ -92,17 +106,18 @@ def run_backtest(

prices_t = prices.iloc[t - lookback : t + 1] # Up to t
spread_t = spread.iloc[t - lookback : t + 1]
volume_t = volume.iloc[t - lookback : t + 1]

mean_t = means.loc[day] # Forecast for return t to t+1
covariance_t = covariances[day] # Forecast for covariance t to t+1
chol_t = cholesky_factorizations[day]
volas_t = np.sqrt(np.diag(covariance_t.values))

inputs_t = OptimizationInput(
prices_t,
mean_t,
covariance_t,
chol_t,
volas_t,
spread_t,
volume_t,
quantities,
cash,
risk_target,
Expand Down Expand Up @@ -181,7 +196,10 @@ def interest_and_fees(
cash_interest = cash * (1 + rf) ** days_t_to_t_minus_1 - cash
short_valuations = np.clip(quantities, None, 0) * prices
short_value = short_valuations.sum()
shorting_fee = short_value * (1 + rf) ** days_t_to_t_minus_1 - short_value
short_spread = 0.05 / 360
shorting_fee = (
short_value * (1 + rf + short_spread) ** days_t_to_t_minus_1 - short_value
)
return cash_interest + shorting_fee


Expand Down Expand Up @@ -245,12 +263,16 @@ def asset_weights(self):
return self.valuations.div(self.portfolio_value, axis=0)

@property
def turnover(self) -> float:
def daily_turnover(self) -> pd.Series:
trades = self.quantities.diff()
prices = load_data()[0].loc[self.history]
valuation_trades = trades * prices
relative_trades = valuation_trades.div(self.portfolio_value, axis=0)
return relative_trades.abs().sum(axis=1).mean() * self.periods_per_year
return relative_trades.abs().sum(axis=1)

@property
def turnover(self) -> float:
return self.daily_turnover.mean() * self.periods_per_year

@property
def mean_return(self) -> float:
Expand All @@ -270,7 +292,7 @@ def max_leverage(self) -> float:

@property
def sharpe(self) -> float:
risk_free = load_data()[3].loc[self.history]
risk_free = load_data()[2].loc[self.history]
excess_return = self.portfolio_returns - risk_free
return (
excess_return.mean() / excess_return.std() * np.sqrt(self.periods_per_year)
Expand Down
76 changes: 35 additions & 41 deletions experiments/markowitz.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
@dataclass
class Data:
w_prev: np.ndarray # (n_assets,) array of previous asset weights
c_prev: float # previous cash weight
idio_mean: np.ndarray # (n_assets,) array of idiosyncratic mean returns
factor_mean: np.ndarray # (n_factors,) array of factor mean returns
risk_free: float # risk-free rate
Expand All @@ -36,17 +35,23 @@ class Data:
def n_assets(self) -> int:
return self.w_prev.size

@property
def volas(self) -> np.ndarray:
return self.idio_volas + np.linalg.norm(
self.F @ self.factor_covariance_chol, axis=1
)


@dataclass
class Parameters:
w_lower: np.ndarray # (n_assets,) array of lower bounds on asset weights
w_upper: np.ndarray # (n_assets,) array of upper bounds on asset weights
c_lower: float # lower bound on cash weight
c_upper: float # upper bound on cash weight
z_lower: np.ndarray # (n_assets,) array of lower bounds on trades
z_upper: np.ndarray # (n_assets,) array of upper bounds on trades
T_max: float # turnover target
L_max: float # leverage limit
w_min: np.ndarray # (n_assets,) array of lower bounds on asset weights
w_max: np.ndarray # (n_assets,) array of upper bounds on asset weights
c_min: float # lower bound on cash weight
c_max: float # upper bound on cash weight
z_min: np.ndarray # (n_assets,) array of lower bounds on trades
z_max: np.ndarray # (n_assets,) array of upper bounds on trades
T_tar: float # turnover target
L_tar: float # leverage target
rho_mean: np.ndarray # (n_assets,) array of mean returns for rho
rho_covariance: float # uncertainty in covariance matrix
gamma_hold: float # holding cost
Expand Down Expand Up @@ -75,17 +80,11 @@ def markowitz(data: Data, param: Parameters) -> tuple[np.ndarray, float, cp.Prob
return_uncertainty = param.rho_mean @ cp.abs(w)
return_wc = mean_return - return_uncertainty

# asset volatilities
factor_volas = cp.norm2(data.F @ data.factor_covariance_chol, axis=1)
volas = factor_volas + data.idio_volas

# portfolio risk
# worst-case (robust) risk
factor_risk = cp.norm2((data.F @ data.factor_covariance_chol).T @ w)
idio_risk = cp.norm2(cp.multiply(data.idio_volas, w))
risk = cp.norm2(cp.hstack([factor_risk, idio_risk]))

# worst-case (robust) risk
risk_uncertainty = param.rho_covariance**0.5 * volas @ cp.abs(w)
risk_uncertainty = param.rho_covariance**0.5 * data.volas @ cp.abs(w)
risk_wc = cp.norm2(cp.hstack([risk, risk_uncertainty]))

asset_holding_cost = data.kappa_short @ cp.pos(-w)
Expand All @@ -97,24 +96,20 @@ def markowitz(data: Data, param: Parameters) -> tuple[np.ndarray, float, cp.Prob
trading_cost = spread_cost + impact_cost

objective = (
return_wc
- param.gamma_risk * cp.pos(risk_wc - param.risk_target)
- param.gamma_hold * holding_cost
- param.gamma_trade * trading_cost
- param.gamma_turn * cp.pos(T - param.T_max)
return_wc - param.gamma_hold * holding_cost - param.gamma_trade * trading_cost
)

constraints = [
cp.sum(w) + c == 1,
c == data.c_prev - cp.sum(z),
param.c_lower <= c,
c <= param.c_upper,
param.w_lower <= w,
w <= param.w_upper,
param.z_lower <= z,
z <= param.z_upper,
L <= param.L_max,
T <= param.T_max,
param.w_min <= w,
w <= param.w_max,
L <= param.L_tar,
param.c_min <= c,
c <= param.c_max,
param.z_min <= z,
z <= param.z_max,
T <= param.T_tar,
risk_wc <= param.risk_target,
]

problem = cp.Problem(cp.Maximize(objective), constraints)
Expand All @@ -130,7 +125,6 @@ def markowitz(data: Data, param: Parameters) -> tuple[np.ndarray, float, cp.Prob
n_assets = 10
data = Data(
w_prev=np.ones(n_assets) / n_assets,
c_prev=0.0,
idio_mean=np.zeros(n_assets),
factor_mean=np.zeros(n_assets),
risk_free=0.0,
Expand All @@ -144,14 +138,14 @@ def markowitz(data: Data, param: Parameters) -> tuple[np.ndarray, float, cp.Prob
)

param = Parameters(
w_lower=np.zeros(n_assets),
w_upper=np.ones(n_assets),
c_lower=0.0,
c_upper=1.0,
z_lower=-np.ones(n_assets),
z_upper=np.ones(n_assets),
T_max=1.0,
L_max=1.0,
w_min=np.zeros(n_assets),
w_max=np.ones(n_assets),
c_min=0.0,
c_max=1.0,
z_min=-np.ones(n_assets),
z_max=np.ones(n_assets),
T_tar=1.0,
L_tar=1.0,
rho_mean=np.zeros(n_assets),
rho_covariance=0.0,
gamma_hold=0.0,
Expand All @@ -161,5 +155,5 @@ def markowitz(data: Data, param: Parameters) -> tuple[np.ndarray, float, cp.Prob
risk_target=0.0,
)

w, c = markowitz(data, param)
w, c, _ = markowitz(data, param)
print(w, c)
Loading

0 comments on commit 250e8e9

Please sign in to comment.