From da80f7dc66a86a4a958064c4a76bab9738b5b872 Mon Sep 17 00:00:00 2001 From: Kasper Johansson Date: Sun, 22 Oct 2023 16:49:06 -0700 Subject: [PATCH 01/10] typo --- experiments/taming.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/taming.py b/experiments/taming.py index 44e09e1..8435e15 100644 --- a/experiments/taming.py +++ b/experiments/taming.py @@ -20,7 +20,7 @@ def unconstrained_markowitz(inputs: OptimizationInput) -> np.ndarray: chol = np.linalg.cholesky(Sigma) constraints = [ cp.sum(w) + c == 1, - cp.norm2(chol @ w) <= inputs.risk_target, + cp.norm2(chol.T @ w) <= inputs.risk_target, ] problem = cp.Problem(cp.Maximize(objective), constraints) problem.solve(get_solver()) From 6e42f6d71d8998de09c60540f13b64a92051abca Mon Sep 17 00:00:00 2001 From: Kasper Johansson Date: Sun, 22 Oct 2023 17:15:01 -0700 Subject: [PATCH 02/10] synthetic returns --- experiments/backtest.py | 3 +- experiments/playgroung.ipynb | 492 +++++++++++++++++++++++++++++++++++ experiments/utils.py | 8 + 3 files changed, 502 insertions(+), 1 deletion(-) create mode 100644 experiments/playgroung.ipynb create mode 100644 experiments/utils.py diff --git a/experiments/backtest.py b/experiments/backtest.py index 3a3017e..1ba5858 100644 --- a/experiments/backtest.py +++ b/experiments/backtest.py @@ -6,6 +6,7 @@ from typing import Callable import numpy as np import pandas as pd +from utils import synthetic_returns # hack to allow importing from parent directory without having a package sys.path.append(str(Path(__file__).parent.parent)) @@ -61,7 +62,7 @@ def run_backtest( post_trade_cash = [] post_trade_quantities = [] - returns = prices.pct_change().dropna() + returns = synthetic_returns(prices).dropna() means = returns.ewm(halflife=125).mean() covariance_df = returns.ewm(halflife=125).cov() days = returns.index diff --git a/experiments/playgroung.ipynb b/experiments/playgroung.ipynb new file mode 100644 index 0000000..12e1f15 --- /dev/null +++ b/experiments/playgroung.ipynb @@ -0,0 +1,492 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import *\n", + "import pandas as pd\n", + "\n", + "# autoreload \n", + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "prices = pd.read_csv(\"../data/prices.csv\", index_col=0, parse_dates=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.1414213562373095" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.sqrt(0.02)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "returns = synthetic_returns(prices)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "returns2 = prices.pct_change()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[1. , 0.14747197],\n", + " [0.14747197, 1. ]])" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.corrcoef(returns2.dropna().values.flatten(), returns.dropna().values.flatten())" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
T0T1T3T4T5T6T7T8T9T10...T89T90T92T93T94T95T96T98T99T100
2000-01-04NaNNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaNNaNNaNNaNNaNNaNNaNNaNNaNNaN
2000-01-05-0.0014540.0003800.0059090.003457-0.005594-0.0056610.0068960.0007600.0019630.007932...0.007658-0.003752-0.001921-0.001777-0.0005610.002421-0.000910-0.0050580.0055280.001457
2000-01-06-0.005067-0.0016870.000715-0.000934-0.0003320.0014530.002840-0.000088-0.000333-0.001227...-0.004984-0.0021190.002004-0.0018330.0026700.001595-0.0053300.0020260.003262-0.006554
2000-01-070.005604-0.000995-0.000164-0.004608-0.008584-0.0003060.004896-0.0019710.0017720.000841...0.0057920.0011420.0047150.0001730.0033280.0015180.009072-0.0044460.001961-0.004652
2000-01-100.0010930.0010360.0018080.000430-0.001655-0.001340-0.0062310.007149-0.005827-0.003586...-0.0047410.0038900.0004240.0025440.002860-0.0028890.001497-0.003636-0.0009240.003583
..................................................................
2023-09-200.0006720.0033780.003179-0.0057610.0034750.001039-0.000845-0.002282-0.003364-0.002279...-0.0017700.0022490.002883-0.003437-0.0055060.0026030.001204-0.0031500.0036820.000884
2023-09-21-0.0040140.000238-0.0023010.000503-0.0025540.001577-0.0003180.0073580.000760-0.005215...-0.0021960.009500-0.003946-0.000513-0.002119-0.0013170.0012330.0011150.0012660.001024
2023-09-22-0.0019840.003389-0.0039660.0055490.002383-0.0053220.000990-0.0006380.0025610.003347...-0.0022770.003851-0.000812-0.000530-0.005350-0.006388-0.0043310.0002080.003962-0.001164
2023-09-250.001255-0.0004910.000934-0.0034860.0028230.0009360.0011780.0026760.000966-0.000768...-0.002975-0.0024040.005076-0.001225-0.002615-0.0015260.0060410.0014510.0012800.001500
2023-09-26-0.002724-0.0033380.0019980.0018340.0001550.000516-0.0032930.003096-0.004128-0.001218...0.004620-0.002510-0.0035030.008428-0.001012-0.000826-0.000012-0.0000550.0009070.003052
\n", + "

6191 rows × 75 columns

\n", + "
" + ], + "text/plain": [ + " T0 T1 T3 T4 T5 T6 \\\n", + "2000-01-04 NaN NaN NaN NaN NaN NaN \n", + "2000-01-05 -0.001454 0.000380 0.005909 0.003457 -0.005594 -0.005661 \n", + "2000-01-06 -0.005067 -0.001687 0.000715 -0.000934 -0.000332 0.001453 \n", + "2000-01-07 0.005604 -0.000995 -0.000164 -0.004608 -0.008584 -0.000306 \n", + "2000-01-10 0.001093 0.001036 0.001808 0.000430 -0.001655 -0.001340 \n", + "... ... ... ... ... ... ... \n", + "2023-09-20 0.000672 0.003378 0.003179 -0.005761 0.003475 0.001039 \n", + "2023-09-21 -0.004014 0.000238 -0.002301 0.000503 -0.002554 0.001577 \n", + "2023-09-22 -0.001984 0.003389 -0.003966 0.005549 0.002383 -0.005322 \n", + "2023-09-25 0.001255 -0.000491 0.000934 -0.003486 0.002823 0.000936 \n", + "2023-09-26 -0.002724 -0.003338 0.001998 0.001834 0.000155 0.000516 \n", + "\n", + " T7 T8 T9 T10 ... T89 T90 \\\n", + "2000-01-04 NaN NaN NaN NaN ... NaN NaN \n", + "2000-01-05 0.006896 0.000760 0.001963 0.007932 ... 0.007658 -0.003752 \n", + "2000-01-06 0.002840 -0.000088 -0.000333 -0.001227 ... -0.004984 -0.002119 \n", + "2000-01-07 0.004896 -0.001971 0.001772 0.000841 ... 0.005792 0.001142 \n", + "2000-01-10 -0.006231 0.007149 -0.005827 -0.003586 ... -0.004741 0.003890 \n", + "... ... ... ... ... ... ... ... \n", + "2023-09-20 -0.000845 -0.002282 -0.003364 -0.002279 ... -0.001770 0.002249 \n", + "2023-09-21 -0.000318 0.007358 0.000760 -0.005215 ... -0.002196 0.009500 \n", + "2023-09-22 0.000990 -0.000638 0.002561 0.003347 ... -0.002277 0.003851 \n", + "2023-09-25 0.001178 0.002676 0.000966 -0.000768 ... -0.002975 -0.002404 \n", + "2023-09-26 -0.003293 0.003096 -0.004128 -0.001218 ... 0.004620 -0.002510 \n", + "\n", + " T92 T93 T94 T95 T96 T98 \\\n", + "2000-01-04 NaN NaN NaN NaN NaN NaN \n", + "2000-01-05 -0.001921 -0.001777 -0.000561 0.002421 -0.000910 -0.005058 \n", + "2000-01-06 0.002004 -0.001833 0.002670 0.001595 -0.005330 0.002026 \n", + "2000-01-07 0.004715 0.000173 0.003328 0.001518 0.009072 -0.004446 \n", + "2000-01-10 0.000424 0.002544 0.002860 -0.002889 0.001497 -0.003636 \n", + "... ... ... ... ... ... ... \n", + "2023-09-20 0.002883 -0.003437 -0.005506 0.002603 0.001204 -0.003150 \n", + "2023-09-21 -0.003946 -0.000513 -0.002119 -0.001317 0.001233 0.001115 \n", + "2023-09-22 -0.000812 -0.000530 -0.005350 -0.006388 -0.004331 0.000208 \n", + "2023-09-25 0.005076 -0.001225 -0.002615 -0.001526 0.006041 0.001451 \n", + "2023-09-26 -0.003503 0.008428 -0.001012 -0.000826 -0.000012 -0.000055 \n", + "\n", + " T99 T100 \n", + "2000-01-04 NaN NaN \n", + "2000-01-05 0.005528 0.001457 \n", + "2000-01-06 0.003262 -0.006554 \n", + "2000-01-07 0.001961 -0.004652 \n", + "2000-01-10 -0.000924 0.003583 \n", + "... ... ... \n", + "2023-09-20 0.003682 0.000884 \n", + "2023-09-21 0.001266 0.001024 \n", + "2023-09-22 0.003962 -0.001164 \n", + "2023-09-25 0.001280 0.001500 \n", + "2023-09-26 0.000907 0.003052 \n", + "\n", + "[6191 rows x 75 columns]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "returns" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/experiments/utils.py b/experiments/utils.py new file mode 100644 index 0000000..a62ab64 --- /dev/null +++ b/experiments/utils.py @@ -0,0 +1,8 @@ +import numpy as np + + +def synthetic_returns(prices, sigma_r=0.02236, sigma_eps=0.14142): + returns = prices.pct_change() + + alpha = sigma_r**2 / (sigma_r**2 + sigma_eps**2) + return alpha * (returns + np.random.normal(size=returns.shape) * sigma_eps) From abfba1901a7120d09468729fea786fb7e6b08e53 Mon Sep 17 00:00:00 2001 From: Kasper Johansson Date: Sun, 22 Oct 2023 17:15:24 -0700 Subject: [PATCH 03/10] removed playground --- experiments/playgroung.ipynb | 492 ----------------------------------- 1 file changed, 492 deletions(-) delete mode 100644 experiments/playgroung.ipynb diff --git a/experiments/playgroung.ipynb b/experiments/playgroung.ipynb deleted file mode 100644 index 12e1f15..0000000 --- a/experiments/playgroung.ipynb +++ /dev/null @@ -1,492 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "from utils import *\n", - "import pandas as pd\n", - "\n", - "# autoreload \n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "prices = pd.read_csv(\"../data/prices.csv\", index_col=0, parse_dates=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.1414213562373095" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.sqrt(0.02)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "returns = synthetic_returns(prices)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "returns2 = prices.pct_change()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[1. , 0.14747197],\n", - " [0.14747197, 1. ]])" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.corrcoef(returns2.dropna().values.flatten(), returns.dropna().values.flatten())" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
T0T1T3T4T5T6T7T8T9T10...T89T90T92T93T94T95T96T98T99T100
2000-01-04NaNNaNNaNNaNNaNNaNNaNNaNNaNNaN...NaNNaNNaNNaNNaNNaNNaNNaNNaNNaN
2000-01-05-0.0014540.0003800.0059090.003457-0.005594-0.0056610.0068960.0007600.0019630.007932...0.007658-0.003752-0.001921-0.001777-0.0005610.002421-0.000910-0.0050580.0055280.001457
2000-01-06-0.005067-0.0016870.000715-0.000934-0.0003320.0014530.002840-0.000088-0.000333-0.001227...-0.004984-0.0021190.002004-0.0018330.0026700.001595-0.0053300.0020260.003262-0.006554
2000-01-070.005604-0.000995-0.000164-0.004608-0.008584-0.0003060.004896-0.0019710.0017720.000841...0.0057920.0011420.0047150.0001730.0033280.0015180.009072-0.0044460.001961-0.004652
2000-01-100.0010930.0010360.0018080.000430-0.001655-0.001340-0.0062310.007149-0.005827-0.003586...-0.0047410.0038900.0004240.0025440.002860-0.0028890.001497-0.003636-0.0009240.003583
..................................................................
2023-09-200.0006720.0033780.003179-0.0057610.0034750.001039-0.000845-0.002282-0.003364-0.002279...-0.0017700.0022490.002883-0.003437-0.0055060.0026030.001204-0.0031500.0036820.000884
2023-09-21-0.0040140.000238-0.0023010.000503-0.0025540.001577-0.0003180.0073580.000760-0.005215...-0.0021960.009500-0.003946-0.000513-0.002119-0.0013170.0012330.0011150.0012660.001024
2023-09-22-0.0019840.003389-0.0039660.0055490.002383-0.0053220.000990-0.0006380.0025610.003347...-0.0022770.003851-0.000812-0.000530-0.005350-0.006388-0.0043310.0002080.003962-0.001164
2023-09-250.001255-0.0004910.000934-0.0034860.0028230.0009360.0011780.0026760.000966-0.000768...-0.002975-0.0024040.005076-0.001225-0.002615-0.0015260.0060410.0014510.0012800.001500
2023-09-26-0.002724-0.0033380.0019980.0018340.0001550.000516-0.0032930.003096-0.004128-0.001218...0.004620-0.002510-0.0035030.008428-0.001012-0.000826-0.000012-0.0000550.0009070.003052
\n", - "

6191 rows × 75 columns

\n", - "
" - ], - "text/plain": [ - " T0 T1 T3 T4 T5 T6 \\\n", - "2000-01-04 NaN NaN NaN NaN NaN NaN \n", - "2000-01-05 -0.001454 0.000380 0.005909 0.003457 -0.005594 -0.005661 \n", - "2000-01-06 -0.005067 -0.001687 0.000715 -0.000934 -0.000332 0.001453 \n", - "2000-01-07 0.005604 -0.000995 -0.000164 -0.004608 -0.008584 -0.000306 \n", - "2000-01-10 0.001093 0.001036 0.001808 0.000430 -0.001655 -0.001340 \n", - "... ... ... ... ... ... ... \n", - "2023-09-20 0.000672 0.003378 0.003179 -0.005761 0.003475 0.001039 \n", - "2023-09-21 -0.004014 0.000238 -0.002301 0.000503 -0.002554 0.001577 \n", - "2023-09-22 -0.001984 0.003389 -0.003966 0.005549 0.002383 -0.005322 \n", - "2023-09-25 0.001255 -0.000491 0.000934 -0.003486 0.002823 0.000936 \n", - "2023-09-26 -0.002724 -0.003338 0.001998 0.001834 0.000155 0.000516 \n", - "\n", - " T7 T8 T9 T10 ... T89 T90 \\\n", - "2000-01-04 NaN NaN NaN NaN ... NaN NaN \n", - "2000-01-05 0.006896 0.000760 0.001963 0.007932 ... 0.007658 -0.003752 \n", - "2000-01-06 0.002840 -0.000088 -0.000333 -0.001227 ... -0.004984 -0.002119 \n", - "2000-01-07 0.004896 -0.001971 0.001772 0.000841 ... 0.005792 0.001142 \n", - "2000-01-10 -0.006231 0.007149 -0.005827 -0.003586 ... -0.004741 0.003890 \n", - "... ... ... ... ... ... ... ... \n", - "2023-09-20 -0.000845 -0.002282 -0.003364 -0.002279 ... -0.001770 0.002249 \n", - "2023-09-21 -0.000318 0.007358 0.000760 -0.005215 ... -0.002196 0.009500 \n", - "2023-09-22 0.000990 -0.000638 0.002561 0.003347 ... -0.002277 0.003851 \n", - "2023-09-25 0.001178 0.002676 0.000966 -0.000768 ... -0.002975 -0.002404 \n", - "2023-09-26 -0.003293 0.003096 -0.004128 -0.001218 ... 0.004620 -0.002510 \n", - "\n", - " T92 T93 T94 T95 T96 T98 \\\n", - "2000-01-04 NaN NaN NaN NaN NaN NaN \n", - "2000-01-05 -0.001921 -0.001777 -0.000561 0.002421 -0.000910 -0.005058 \n", - "2000-01-06 0.002004 -0.001833 0.002670 0.001595 -0.005330 0.002026 \n", - "2000-01-07 0.004715 0.000173 0.003328 0.001518 0.009072 -0.004446 \n", - "2000-01-10 0.000424 0.002544 0.002860 -0.002889 0.001497 -0.003636 \n", - "... ... ... ... ... ... ... \n", - "2023-09-20 0.002883 -0.003437 -0.005506 0.002603 0.001204 -0.003150 \n", - "2023-09-21 -0.003946 -0.000513 -0.002119 -0.001317 0.001233 0.001115 \n", - "2023-09-22 -0.000812 -0.000530 -0.005350 -0.006388 -0.004331 0.000208 \n", - "2023-09-25 0.005076 -0.001225 -0.002615 -0.001526 0.006041 0.001451 \n", - "2023-09-26 -0.003503 0.008428 -0.001012 -0.000826 -0.000012 -0.000055 \n", - "\n", - " T99 T100 \n", - "2000-01-04 NaN NaN \n", - "2000-01-05 0.005528 0.001457 \n", - "2000-01-06 0.003262 -0.006554 \n", - "2000-01-07 0.001961 -0.004652 \n", - "2000-01-10 -0.000924 0.003583 \n", - "... ... ... \n", - "2023-09-20 0.003682 0.000884 \n", - "2023-09-21 0.001266 0.001024 \n", - "2023-09-22 0.003962 -0.001164 \n", - "2023-09-25 0.001280 0.001500 \n", - "2023-09-26 0.000907 0.003052 \n", - "\n", - "[6191 rows x 75 columns]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "returns" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.12" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} From b917d94abb8c799b2dd93bcb6eaeff6ef8b9ebe3 Mon Sep 17 00:00:00 2001 From: Kasper Johansson Date: Mon, 23 Oct 2023 09:13:27 -0700 Subject: [PATCH 04/10] merged long-only and unconstrained --- experiments/taming.py | 48 +++++++++++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/experiments/taming.py b/experiments/taming.py index 8435e15..69dbfdd 100644 --- a/experiments/taming.py +++ b/experiments/taming.py @@ -7,14 +7,20 @@ import matplotlib.pyplot as plt -def unconstrained_markowitz(inputs: OptimizationInput) -> np.ndarray: +def unconstrained_markowitz( + inputs: OptimizationInput, long_only: bool = False +) -> np.ndarray: """Compute the unconstrained Markowitz portfolio weights.""" n_assets = inputs.prices.shape[1] mu, Sigma = inputs.mean.values, inputs.covariance.values - w = cp.Variable(n_assets) - c = cp.Variable() + if long_only: + w = cp.Variable(n_assets, nonneg=True) + c = cp.Variable(nonneg=True) + else: + w = cp.Variable(n_assets) + c = cp.Variable() objective = mu @ w chol = np.linalg.cholesky(Sigma) @@ -22,6 +28,7 @@ def unconstrained_markowitz(inputs: OptimizationInput) -> np.ndarray: cp.sum(w) + c == 1, cp.norm2(chol.T @ w) <= inputs.risk_target, ] + problem = cp.Problem(cp.Maximize(objective), constraints) problem.solve(get_solver()) assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE} @@ -30,21 +37,26 @@ def unconstrained_markowitz(inputs: OptimizationInput) -> np.ndarray: def long_only_markowitz(inputs: OptimizationInput) -> np.ndarray: """Compute the long-only Markowitz portfolio weights.""" - n_assets = inputs.prices.shape[1] - - mu, Sigma = inputs.mean.values, inputs.covariance.values - - w = cp.Variable(n_assets, nonneg=True) - c = cp.Variable(nonneg=True) - objective = mu @ w - constraints = [ - cp.sum(w) + c == 1, - cp.quad_form(w, Sigma, assume_PSD=True) <= inputs.risk_target**2, - ] - problem = cp.Problem(cp.Maximize(objective), constraints) - problem.solve(get_solver()) - assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE} - return w.value, c.value + return unconstrained_markowitz(inputs, long_only=True) + + +# def long_only_markowitz(inputs: OptimizationInput) -> np.ndarray: +# """Compute the long-only Markowitz portfolio weights.""" +# n_assets = inputs.prices.shape[1] + +# mu, Sigma = inputs.mean.values, inputs.covariance.values + +# w = cp.Variable(n_assets, nonneg=True) +# c = cp.Variable(nonneg=True) +# objective = mu @ w +# constraints = [ +# cp.sum(w) + c == 1, +# cp.quad_form(w, Sigma, assume_PSD=True) <= inputs.risk_target**2, +# ] +# problem = cp.Problem(cp.Maximize(objective), constraints) +# problem.solve(get_solver()) +# assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE} +# return w.value, c.value def equal_weights(inputs: OptimizationInput) -> np.ndarray: From bd4a0d05739a5918fc22ebb195ba06571f6f3540 Mon Sep 17 00:00:00 2001 From: Kasper Johansson Date: Mon, 23 Oct 2023 12:19:08 -0700 Subject: [PATCH 05/10] remove uncommented function --- experiments/taming.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/experiments/taming.py b/experiments/taming.py index 69dbfdd..7cf5cdf 100644 --- a/experiments/taming.py +++ b/experiments/taming.py @@ -40,25 +40,6 @@ def long_only_markowitz(inputs: OptimizationInput) -> np.ndarray: return unconstrained_markowitz(inputs, long_only=True) -# def long_only_markowitz(inputs: OptimizationInput) -> np.ndarray: -# """Compute the long-only Markowitz portfolio weights.""" -# n_assets = inputs.prices.shape[1] - -# mu, Sigma = inputs.mean.values, inputs.covariance.values - -# w = cp.Variable(n_assets, nonneg=True) -# c = cp.Variable(nonneg=True) -# objective = mu @ w -# constraints = [ -# cp.sum(w) + c == 1, -# cp.quad_form(w, Sigma, assume_PSD=True) <= inputs.risk_target**2, -# ] -# problem = cp.Problem(cp.Maximize(objective), constraints) -# problem.solve(get_solver()) -# assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE} -# return w.value, c.value - - def equal_weights(inputs: OptimizationInput) -> np.ndarray: """Compute the equal weights portfolio.""" n_assets = inputs.prices.shape[1] From 329830614e9806dad0f41dde98750193f1d44e30 Mon Sep 17 00:00:00 2001 From: phschiele Date: Tue, 24 Oct 2023 00:58:00 +0200 Subject: [PATCH 06/10] Start preparing table for taming --- experiments/taming.py | 92 ++++++++++++++++++++----------------------- 1 file changed, 43 insertions(+), 49 deletions(-) diff --git a/experiments/taming.py b/experiments/taming.py index 7cf5cdf..5a7a526 100644 --- a/experiments/taming.py +++ b/experiments/taming.py @@ -10,7 +10,7 @@ def unconstrained_markowitz( inputs: OptimizationInput, long_only: bool = False ) -> np.ndarray: - """Compute the unconstrained Markowitz portfolio weights.""" + """Compute the unconstrained (or long-only) Markowitz portfolio weights.""" n_assets = inputs.prices.shape[1] mu, Sigma = inputs.mean.values, inputs.covariance.values @@ -108,71 +108,65 @@ def get_parameters(data, risk_target): ) -def main(from_checkpoint: bool = False): +def main(from_checkpoint: bool = True): if from_checkpoint: - unconstrained_results = [] - for f in [ + unconstrained_files = [ f for f in os.listdir("checkpoints") if f.startswith("unconstrained") - ]: - unconstrained_results.append(BacktestResult.load(f"checkpoints/{f}")) + ] + assert len(unconstrained_files) == 1 + unconstrained_result = BacktestResult.load( + f"checkpoints/{unconstrained_files[0]}" + ) + + long_only_files = [ + f for f in os.listdir("checkpoints") if f.startswith("long_only") + ] + assert len(long_only_files) == 1 + long_only_result = BacktestResult.load(f"checkpoints/{long_only_files[0]}") + equal_weights_results = BacktestResult.load("checkpoints/equal_weights.pickle") else: equal_weights_results = run_backtest(equal_weights, 0.0, verbose=True) equal_weights_results.save("checkpoints/equal_weights.pickle") adjustment_factor = np.sqrt(equal_weights_results.periods_per_year) - sigma_targets = np.array([0.10]) / adjustment_factor - unconstrained_results = [] - for sigma_target in sigma_targets: - result = run_backtest(unconstrained_markowitz, sigma_target, verbose=True) - result.save( - f"checkpoints/unconstrained_{result.risk_target * adjustment_factor:.2f}.pickle" - ) - unconstrained_results.append(result) - - long_only_results = [] - for sigma_target in sigma_targets: - result = run_backtest(long_only_markowitz, sigma_target, verbose=True) - result.save( - f"checkpoints/long_only_{result.risk_target * adjustment_factor:.2f}.pickle" - ) - long_only_results.append(result) - - generate_table(equal_weights_results, unconstrained_results, long_only_results) - plot_results(equal_weights_results, unconstrained_results, long_only_results) + annualized_target = 0.10 + sigma_target = annualized_target / adjustment_factor + + unconstrained_result = run_backtest( + unconstrained_markowitz, sigma_target, verbose=True + ) + unconstrained_result.save( + f"checkpoints/unconstrained_{annualized_target}.pickle" + ) + + long_only_result = run_backtest(long_only_markowitz, sigma_target, verbose=True) + long_only_result.save(f"checkpoints/long_only_{annualized_target}.pickle") + + generate_table(equal_weights_results, unconstrained_result, long_only_result) + plot_results(equal_weights_results, unconstrained_result, long_only_result) def generate_table( equal_weights_results: BacktestResult, - unconstrained_results: list[BacktestResult], - long_only_results: list[BacktestResult], + unconstrained_results: BacktestResult, + long_only_results: BacktestResult, ) -> None: # Table 1 df = pd.DataFrame( - index=["Equal weights"] - + [ - f"$\\sigma^\\text{{tar}} = {result.risk_target:.2f}$" - for result in unconstrained_results - ], + index=["Equal weights", "Unconstrained Markowitz", "Long-only Markowitz"], columns=["Mean return", "Volatility", "Sharpe", "Turnover", "Max leverage"], ) - df["Mean return"] = [equal_weights_results.mean_return] + [ - result.mean_return for result in unconstrained_results - ] - df["Volatility"] = [equal_weights_results.volatility] + [ - result.volatility for result in unconstrained_results - ] - df["Sharpe"] = [equal_weights_results.sharpe] + [ - result.sharpe for result in unconstrained_results - ] - df["Turnover"] = [equal_weights_results.turnover] + [ - result.turnover for result in unconstrained_results - ] - df["Max leverage"] = [ - equal_weights_results.asset_weights.abs().sum(axis=1).max() - ] + [ - result.asset_weights.abs().sum(axis=1).max() for result in unconstrained_results - ] + strategies = [equal_weights_results, unconstrained_results, long_only_results] + + df["Mean return"] = list(map(lambda x: x.mean_return, strategies)) + df["Volatility"] = list(map(lambda x: x.volatility, strategies)) + df["Sharpe"] = list(map(lambda x: x.sharpe, strategies)) + df["Turnover"] = list(map(lambda x: x.turnover, strategies)) + df["Max leverage"] = list( + map(lambda x: x.asset_weights.abs().sum(axis=1).max(), strategies) + ) + print(df.to_latex(float_format="%.2f")) # Table 2 From a355ead2942078c2ffd580f266e5d9c76a94eef3 Mon Sep 17 00:00:00 2001 From: Kasper Johansson Date: Tue, 24 Oct 2023 08:44:58 -0700 Subject: [PATCH 07/10] explained return predictions model --- experiments/utils.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/experiments/utils.py b/experiments/utils.py index a62ab64..cf435cb 100644 --- a/experiments/utils.py +++ b/experiments/utils.py @@ -1,8 +1,22 @@ import numpy as np -def synthetic_returns(prices, sigma_r=0.02236, sigma_eps=0.14142): +def synthetic_returns(prices, var_r=0.0005, var_eps=0.02): + """ + param prices: a DataFrame of prices + param var_r: the Gaussian variance of the returns + param var_eps: the Gaussian variance of the noise term + + returns: a DataFrame of "synthetic return predictions" computed as + alpha*(returns+noise), where alpha=var_r / (var_r + var_eps); this is the + coefficient that minimize the variance of the prediction error under the + above model. + + var_r = 0.0005 and var_eps = 0.02 correspond to an information ratio + sqrt(alpha) of about 0.15. + """ returns = prices.pct_change() - alpha = sigma_r**2 / (sigma_r**2 + sigma_eps**2) + alpha = var_r / (var_r + var_eps) + sigma_eps = np.sqrt(var_eps) return alpha * (returns + np.random.normal(size=returns.shape) * sigma_eps) From 8f2b28144fe81ee9522e03937020efd124e9e12c Mon Sep 17 00:00:00 2001 From: phschiele Date: Tue, 24 Oct 2023 23:55:06 +0200 Subject: [PATCH 08/10] Small updates --- experiments/backtest.py | 41 ++++++++++++++++++++++++++--------------- experiments/utils.py | 10 ++++++++-- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/experiments/backtest.py b/experiments/backtest.py index 1ba5858..09f54f6 100644 --- a/experiments/backtest.py +++ b/experiments/backtest.py @@ -46,9 +46,8 @@ def run_backtest( ) -> tuple[pd.Series, pd.DataFrame]: """ Run a simplified backtest for a given strategy. - At time t we use data from t-500 to t-1 to forecast the data and - compute the optimal portfolio weights and cash holdings. - We then trade to these weights at time t. + At time t we use data from t-lookback to t to compute the optimal portfolio + weights and then execute the trades at time t. """ prices, spread, volume, rf = load_data() @@ -62,25 +61,28 @@ def run_backtest( post_trade_cash = [] post_trade_quantities = [] - returns = synthetic_returns(prices).dropna() - means = returns.ewm(halflife=125).mean() - covariance_df = returns.ewm(halflife=125).cov() + returns = prices.pct_change().dropna() + means = ( + synthetic_returns(prices).shift(-1).dropna() + ) # At time t includes data up to t+1 + covariance_df = returns.ewm(halflife=125).cov() # At time t includes data up to t days = returns.index covariances = {} for day in days: covariances[day] = covariance_df.loc[day] - for t in range(lookback, len(prices)): + for t in range(lookback, len(prices) - 1): day = prices.index[t] if verbose: print(f"Day {t} of {len(prices)-1}, {day}") - prices_t = prices.iloc[t - lookback : t] # Up to t-1 - spread_t = spread.iloc[t - lookback : t] - volume_t = volume.iloc[t - lookback : t] - mean_t = means.loc[day] - covariance_t = covariances[day] + prices_t = prices.iloc[t - lookback : t + 1] # Up to t + spread_t = spread.iloc[t - lookback : t + 1] + volume_t = volume.iloc[t - lookback : t + 1] + + mean_t = means.loc[day] # Forecast for return t to t+1 + covariance_t = covariances[day] # Forecast for covariance t to t+1 inputs_t = OptimizationInput( prices_t, @@ -108,9 +110,9 @@ def run_backtest( post_trade_cash.append(cash) post_trade_quantities.append(quantities) - post_trade_cash = pd.Series(post_trade_cash, index=prices.index[lookback:]) + post_trade_cash = pd.Series(post_trade_cash, index=prices.index[lookback:-1]) post_trade_quantities = pd.DataFrame( - post_trade_quantities, index=prices.index[lookback:], columns=prices.columns + post_trade_quantities, index=prices.index[lookback:-1], columns=prices.columns ) return BacktestResult(post_trade_cash, post_trade_quantities, risk_target) @@ -236,4 +238,13 @@ def load(path: Path) -> "BacktestResult": n_assets = load_data()[0].shape[1] w_targets = np.ones(n_assets) / (n_assets + 1) c_target = 1 / (n_assets + 1) - run_backtest(lambda _inputs: (w_targets, c_target), risk_target=0.0, verbose=True) + result = run_backtest( + lambda _inputs: (w_targets, c_target), risk_target=0.0, verbose=True + ) + print( + f"Mean return: {result.mean_return:.2%},\n" + f"Volatility: {result.volatility:.2%},\n" + f"Sharpe: {result.sharpe:.2f},\n" + f"Turnover: {result.turnover:.2f},\n" + f"Max leverage: {result.max_leverage:.2f}" + ) diff --git a/experiments/utils.py b/experiments/utils.py index cf435cb..d49a1e2 100644 --- a/experiments/utils.py +++ b/experiments/utils.py @@ -1,7 +1,10 @@ import numpy as np +import pandas as pd -def synthetic_returns(prices, var_r=0.0005, var_eps=0.02): +def synthetic_returns( + prices: pd.DataFrame, var_r: float = 0.0005, var_eps: float = 0.02 +) -> pd.DataFrame: """ param prices: a DataFrame of prices param var_r: the Gaussian variance of the returns @@ -19,4 +22,7 @@ def synthetic_returns(prices, var_r=0.0005, var_eps=0.02): alpha = var_r / (var_r + var_eps) sigma_eps = np.sqrt(var_eps) - return alpha * (returns + np.random.normal(size=returns.shape) * sigma_eps) + synthetic_returns = alpha * ( + returns + np.random.normal(size=returns.shape) * sigma_eps + ) + return synthetic_returns From 3a907af1202adcdc134e3042b40e59edc129955e Mon Sep 17 00:00:00 2001 From: phschiele Date: Wed, 25 Oct 2023 00:58:39 +0200 Subject: [PATCH 09/10] Allow solver to fail --- experiments/backtest.py | 3 +++ experiments/taming.py | 51 +++++++++++++++++------------------------ 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/experiments/backtest.py b/experiments/backtest.py index 09f54f6..8f5b20a 100644 --- a/experiments/backtest.py +++ b/experiments/backtest.py @@ -199,6 +199,9 @@ def asset_weights(self): @property def turnover(self) -> float: + """ + Note that turnover here includes weight changes due to price changes. + """ return ( self.asset_weights.diff().abs().sum(axis=1).mean() * self.periods_per_year ) diff --git a/experiments/taming.py b/experiments/taming.py index 5a7a526..51bb2a1 100644 --- a/experiments/taming.py +++ b/experiments/taming.py @@ -1,3 +1,4 @@ +import logging import os import numpy as np import pandas as pd @@ -28,11 +29,21 @@ def unconstrained_markowitz( cp.sum(w) + c == 1, cp.norm2(chol.T @ w) <= inputs.risk_target, ] + if not long_only: + constraints.append(c == 0) problem = cp.Problem(cp.Maximize(objective), constraints) problem.solve(get_solver()) - assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE} - return w.value, c.value + if problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE}: + return w.value, c.value + else: + cash = inputs.cash + quantities = inputs.quantities + portfolio_value = cash + quantities @ inputs.prices.iloc[-1] + valuations = quantities * inputs.prices.iloc[-1] + w, c = valuations / portfolio_value, cash / portfolio_value + logging.warning(f"Problem status: {problem.status}, returning previous weights") + return w, c def long_only_markowitz(inputs: OptimizationInput) -> np.ndarray: @@ -108,7 +119,7 @@ def get_parameters(data, risk_target): ) -def main(from_checkpoint: bool = True): +def main(from_checkpoint: bool = False): if from_checkpoint: unconstrained_files = [ f for f in os.listdir("checkpoints") if f.startswith("unconstrained") @@ -130,7 +141,7 @@ def main(from_checkpoint: bool = True): equal_weights_results.save("checkpoints/equal_weights.pickle") adjustment_factor = np.sqrt(equal_weights_results.periods_per_year) - annualized_target = 0.10 + annualized_target = 0.13 sigma_target = annualized_target / adjustment_factor unconstrained_result = run_backtest( @@ -144,7 +155,7 @@ def main(from_checkpoint: bool = True): long_only_result.save(f"checkpoints/long_only_{annualized_target}.pickle") generate_table(equal_weights_results, unconstrained_result, long_only_result) - plot_results(equal_weights_results, unconstrained_result, long_only_result) + # plot_results(equal_weights_results, unconstrained_result, long_only_result) def generate_table( @@ -159,31 +170,11 @@ def generate_table( ) strategies = [equal_weights_results, unconstrained_results, long_only_results] - df["Mean return"] = list(map(lambda x: x.mean_return, strategies)) - df["Volatility"] = list(map(lambda x: x.volatility, strategies)) - df["Sharpe"] = list(map(lambda x: x.sharpe, strategies)) - df["Turnover"] = list(map(lambda x: x.turnover, strategies)) - df["Max leverage"] = list( - map(lambda x: x.asset_weights.abs().sum(axis=1).max(), strategies) - ) - - print(df.to_latex(float_format="%.2f")) - - # Table 2 - df = pd.DataFrame( - index=[ - f"$\\sigma^\\text{{tar}} = {result.risk_target:.2f}$" - for result in long_only_results - ], - columns=["Mean return", "Volatility", "Sharpe", "Turnover", "Max leverage"], - ) - df["Mean return"] = [result.mean_return for result in long_only_results] - df["Volatility"] = [result.volatility for result in long_only_results] - df["Sharpe"] = [result.sharpe for result in long_only_results] - df["Turnover"] = [result.turnover for result in long_only_results] - df["Max leverage"] = [ - result.asset_weights.abs().sum(axis=1).max() for result in long_only_results - ] + df["Mean return"] = [result.mean_return for result in strategies] + df["Volatility"] = [result.volatility for result in strategies] + df["Sharpe"] = [result.sharpe for result in strategies] + df["Turnover"] = [result.turnover for result in strategies] + df["Max leverage"] = [result.max_leverage for result in strategies] print(df.to_latex(float_format="%.2f")) From b5f1d69aa170908818d96a4a1c44ec92be7bf333 Mon Sep 17 00:00:00 2001 From: phschiele Date: Wed, 25 Oct 2023 18:28:16 +0200 Subject: [PATCH 10/10] Remove nonneg cash constraint --- experiments/taming.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/experiments/taming.py b/experiments/taming.py index 51bb2a1..553e7a3 100644 --- a/experiments/taming.py +++ b/experiments/taming.py @@ -1,4 +1,3 @@ -import logging import os import numpy as np import pandas as pd @@ -29,21 +28,11 @@ def unconstrained_markowitz( cp.sum(w) + c == 1, cp.norm2(chol.T @ w) <= inputs.risk_target, ] - if not long_only: - constraints.append(c == 0) problem = cp.Problem(cp.Maximize(objective), constraints) problem.solve(get_solver()) - if problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE}: - return w.value, c.value - else: - cash = inputs.cash - quantities = inputs.quantities - portfolio_value = cash + quantities @ inputs.prices.iloc[-1] - valuations = quantities * inputs.prices.iloc[-1] - w, c = valuations / portfolio_value, cash / portfolio_value - logging.warning(f"Problem status: {problem.status}, returning previous weights") - return w, c + assert problem.status in {cp.OPTIMAL, cp.OPTIMAL_INACCURATE}, problem.status + return w.value, c.value def long_only_markowitz(inputs: OptimizationInput) -> np.ndarray: