Skip to content

Instantly share code, notes, and snippets.

View 18182324's full-sized avatar
🎯
Focusing

Wealth Hub 18182324

🎯
Focusing
View GitHub Profile
@18182324
18182324 / Backtesting MA in R.r
Last active May 7, 2022 07:13
Backtesting MA in R
# Step 1: Load libraries and data
library(quantmod)
library(PerformanceAnalytics)
##
##Attaching package: 'PerformanceAnalytics'
##The following object is masked from 'package:graphics':
##legend
getSymbols('NFCI', src = 'FRED', , from = '2000-01-01')
##[1] "NFCI"
NFCI <- na.omit(lag(NFCI)) # we can only act on the signal after release, i.e. the next day
import alpaca_trade_api as tradeapi
from ta.trend import MACD
import pandas as pd
import numpy as np
import time
import datetime as dt
import logging
# ============ CONFIGURATION ============
# Replace with your actual Alpaca API credentials
@18182324
18182324 / pairs-trading-advanced-strategy
Created November 14, 2021 10:01
Pairs Trading Advanced K-Means Algorithm Clustering
companies_dict = {
'Amazon':'AMZN',
'Apple':'AAPL',
'Walgreen':'WBA',
'Northrop Grumman':'NOC',
'Boeing':'BA',
'Lockheed Martin':'LMT',
'McDonalds':'MCD',
'Intel':'INTC',
'Navistar':'NAV',
from datetime import timedelta
class IronCondorAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2021, 2, 1)
self.SetEndDate(2021, 8, 1)
self.SetCash(30000)
equity = self.AddEquity("TSLA", Resolution.Minute)
option = self.AddOption("TSLA", Resolution.Minute)
import matplotlib.pyplot as plt
import pandas as pd
from johansen import coint_johansen
df_x = pd.read_csv ("yourfile.csv",index_col=0)
df_y = pd.read_csv ("yourfile.csv",index_col=0)
df = pd.DataFrame({'x':df_x['Close'],'y':df_y['Close']})
coint_johansen(df,0,1)
import numpy as np
import pandas as pd
import statsmodels.api as sm
data = pd.read_csv("http://web.pdx.edu/~crkl/ceR/data/usyc87.txt",index_col='YEAR',sep='\s+',nrows=66)
y = data['Y']
c = data['C']
from statsmodels.tsa.vector_ar.vecm import coint_johansen
py1_post = np.sum((res["theta"])*res["post"])
py0_post = 1.0 - py1_post
py0_post, py1_post
py1_prior = np.sum((res["theta"])*res["prior"])
py0_prior = 1 - py1_prior
py0_prior, py1_prior
#Plot posterior and prior
plt.figure(figsize=(16,12))
theta=np.arange(0.05,1,0.05)
print(theta)
prior = 1/len(theta) #The normalizing constant of the prior
#Evaluate joint likelihood and unnormalized posterior at one specific #$\theta = 0.5$
dist = tfp.distributions.Bernoulli(probs=0.5) #one specific theta
print(np.prod(dist.prob(obs_data))) #joint likelihood
print(np.prod(dist.prob(obs_data))*prior) #unnormalized posterior
#Repeat the process for all thetas, range 0.05 - 0.95
try: #If running in colab
import google.colab
IN_COLAB = True
%tensorflow_version 2.x
except:
IN_COLAB = False
import tensorflow as tf
if (not tf.__version__.startswith('2')): #Checking if tf 2.0 is installed
print('Please install tensorflow 2.0 to run this notebook')
#Reordering so x values are in increasiong order
order_idx_train=np.squeeze(x_train.argsort(axis=0))
x_train=x_train[order_idx_train]
y_train=y_train[order_idx_train]
order_idx_val=np.squeeze(x_val.argsort(axis=0))
x_val=x_val[order_idx_val]
y_val=y_val[order_idx_val]
order_idx_test=np.squeeze(x_test.argsort(axis=0))