Skip to content

Commit

Permalink
Merge pull request #4 from ryann-green/main
Browse files Browse the repository at this point in the history
updating branch
  • Loading branch information
ryann-green authored Jan 11, 2025
2 parents a9347d6 + f51e3a5 commit f5f52d7
Show file tree
Hide file tree
Showing 7 changed files with 19,127 additions and 18,910 deletions.
36,910 changes: 18,503 additions & 18,407 deletions predictions/backtest_results.csv

Large diffs are not rendered by default.

4 changes: 3 additions & 1 deletion predictions/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,9 @@
# total_results.append(all_results)

increment_predictions(f'10_day_ahead_close/stock_performance/{date}/tickers','predictions/predictions_table.csv')
increment_non_trigger_evals((f'10_day_ahead_close/stock_performance/{date}/tickers','predictions/non_trigger_stocks.csv'))
increment_non_trigger_evals(f'10_day_ahead_close/stock_performance/{date}/tickers','predictions/non_trigger_stocks.csv')

# At this point we will run backtesting.py

# pd.concat(total_results).to_csv(f'{new_folder_path}/total_results.csv')

792 changes: 374 additions & 418 deletions predictions/non_trigger_stocks.csv

Large diffs are not rendered by default.

260 changes: 208 additions & 52 deletions predictions/predictions_table.csv

Large diffs are not rendered by default.

25 changes: 7 additions & 18 deletions predictions/utils/backtesting.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,10 @@
import pandas as pd
import warnings
import time
from build_results import increment_backtest_results
from build_results import increment_results

warnings.filterwarnings("ignore")

# steps to operationalize
# 1. For stocks that don't meet the high or low triggers, add them to a separate csv with ticker and prediction date for future analysis
# 2. Check to see the latest date for prediction date for each stock in the Summary results csv file. Find the results in the total_results csv that are greater than the ticker+pred_date from the summary file, and use those values to start this price evaluation script.
# 3. Create different segments in sheets to evaluate success of stocks
# 4. Identify top stock to trade on a day of the week based on best chance of profitability within 15 days
#segment by best combination of
# day of week success
# overall success rate
# predicted higher success rate
# predicted higher boolean flag .. validation whether or not my prediction prices actually give me the best chance of success based on subsequent high and low prices
# 5. Based on suggestion above, Figure out stock to trade on next possible trading day based on the latest daste in the "total results" csv file.


# Input data for multiple stocks
# stocks_data = pd.read_csv('total_results.csv')
stocks_data = pd.read_csv('predictions/non_trigger_stocks.csv')
Expand Down Expand Up @@ -121,7 +108,7 @@ def check_first_trigger(historical_data, ticker, buy_price, overall_success_rate
"profit_pct_per_stock": profit_pct
}
else:
print(f"{(h_data['ticker'])} has no trigger price as of {last_date}")
print(f"{(ticker)} from {(last_date)} has no trigger price as of 1/9/2025")

return None

Expand All @@ -141,6 +128,8 @@ def process_stocks(stocks_data, historical_data):
ticker=stock[1]
# print(ticker)

# if ticker in 'MA':

ticker_data = historical_data.loc[historical_data['ticker']==ticker]

# print(ticker_data)
Expand Down Expand Up @@ -190,13 +179,13 @@ def process_stocks(stocks_data, historical_data):
historical_data = fetch_historical_data(stocks_data)
processed_results,non_triggers = process_stocks(stocks_data, historical_data)
# print(processed_results)
increment_backtest_results('predictions/backtest_results.csv',processed_results)
results_df = pd.DataFrame(processed_results)
increment_results('predictions/backtest_results.csv',processed_results)
# print(results_df)
# results_df.to_csv('summary_results.csv', index=False)


# the below should get me a csv file of the trickers and dates that I'd need to check on the next run
# will need to add a clause that ignores ticker from X period in the past
non_triggers_df=pd.DataFrame(non_triggers)
# non_triggers_df.to_csv('non_trigger_stocks.csv', index=False)
# print(results_df)
non_triggers_df.to_csv('predictions/non_trigger_stocks.csv')
44 changes: 32 additions & 12 deletions predictions/utils/build_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ def increment_predictions(target,table):

# run this everytime the predictions script is ran to add on to non_trigger_stocks for new predictions
def increment_non_trigger_evals (target,table):
print('Running increment_non_trigger_evals function')
folder = os.path.expanduser(f'~/Code/stock_predictions/{target}')

#get the DataFrame of the file that I want to append the results to for the predictions
Expand Down Expand Up @@ -158,7 +159,7 @@ def increment_non_trigger_evals (target,table):
if os.path.isfile(file_path):
if 'all_results' in file_path :

print(file_path)
# print(file_path)
try:
new_data = pd.read_csv(file_path)[['ticker'
,'latest_close'
Expand All @@ -167,7 +168,7 @@ def increment_non_trigger_evals (target,table):
,'last_date_for_prediction'
,'adj_prediction_price_w_high_inc'
,'adj_prediction_higher'
,'stop_loss '
,'stop_loss'
]]

new_data['buy_price']=new_data['latest_close']
Expand All @@ -186,7 +187,7 @@ def increment_non_trigger_evals (target,table):
, "adj_prediction_price_w_high_inc": "adj_prediction_price"
, "adj_prediction_higher": "adj_price_higher"
, "stop_loss ": "stop_loss"}, inplace=True)
print(new_data.columns)
# print(new_data.columns)


new_date=max(new_data['last_date'])
Expand All @@ -200,7 +201,7 @@ def increment_non_trigger_evals (target,table):
print(f"Length of new data: {len(new_data)}")
print(f"Length of combined data after concat {len(pd.concat([incremented_summary,new_data]))}")

pd.concat([incremented_summary,new_data]).to_csv(table)
pd.concat([incremented_summary,new_data]).reset_index(drop=True).to_csv(table)
# print(pd.concat([incremented_summary,new_data]))

# print(f'Non-Triggered Stocks updated with data from {new_date}')
Expand All @@ -222,15 +223,34 @@ def increment_non_trigger_evals (target,table):
return 'increment_non_trigger_evals'

# run this in backtesting to increment to existing backtest results table
def increment_backtest_results(old_results,new_results):
print('old results')
print(pd.read_csv(old_results))
def increment_results(old_results_path,new_results):

print('new_results')
print(new_results)
print('------------------------------------------')

print('Running increment_results function')
print('Incrementing Backtesting Results')
print('Retrieving existing backtesting results')
old_backtest_results=pd.read_csv(old_results_path)
# old_max_date=max(old_backtest_results['pred_date'])

# print('new_results')
print('Retrieving new backtesting results')
new_backtest_results=pd.DataFrame(new_results)
# new_max_date=max(new_backtest_results['pred_date'])

# print(f"Current max date: {old_max_date}")
# print(f"New data date: {new_max_date}")
print(f"Length of old table: {len(old_backtest_results)}")
print(f"Length of new data: {len(new_backtest_results)}")
print(f"Length of combined data after concat {len(pd.concat([old_backtest_results,new_backtest_results]))}")

pd.concat([old_backtest_results,new_backtest_results]).to_csv(old_results_path)

return 'incremented_results'

def print_incremented_summary (old_results,new_results):

return 'increment_backtest_resultss'
return "Incremented summary statistics"


# using for testing functions
Expand All @@ -240,5 +260,5 @@ def increment_backtest_results(old_results,new_results):

# # print(pd.read_csv('predictions/non_trigger_stocks.csv'))
# incremented_table = increment_non_trigger_evals('10_day_ahead_close/stock_performance/2025-01-04/tickers','predictions/non_trigger_stocks.csv')
increment_backtest_results('predictions/backtest_results.csv')
# print(incremented_table)
# increment_results('predictions/backtest_results.csv')
print('Hi')
2 changes: 0 additions & 2 deletions predictions/utils/get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ def get_initial_df (ticker,start_date,end_date):
data = yf.download(ticker, start=start_date, end=end_date)
# Calculate leading technical indicators
data['ALMA'] = ta.alma(data['Close'])
stoch_rsi = ta.stochrsi(data['Close'])
# stoch_rsi = ta.rsi(data['Close'])

print('this is data close')
print(data['Close'])
Expand Down

0 comments on commit f5f52d7

Please sign in to comment.