Skip to content

Instantly share code, notes, and snippets.

View quantra-go-algo's full-sized avatar

Algorithmic Trading quantra-go-algo

View GitHub Profile
def optimal_d(DF, alpha=0.035, minimum = 0, maximum = 1):
# Copy the dataframe
df = DF.copy()
# Estimate the best d based on the basic d range
out = d_estimates_db(df,minimum,maximum)
out.to_excel('out1.xlsx')
# A try-except block to handle erros while getting a d with better decimals
try:
# Get the d value which critical value is closest to and higher than alpha
d1 = out[out['pVal']>alpha].index.values[-1]
# Get d estimation summary
out = d_estimates_db(data,0,1)
def d_estimates_db(DF,minimum,maximum):
# Create the output dataframe
out=pd.DataFrame(columns=['adfStat','pVal','lags',\
'nObs','95% conf','corr'])
# Copy the dataframe in order to not change it
df0=DF.copy()
# Create a range of posible d values and loop throughout them
for d in np.linspace(minimum,maximum,11):
# Create the log of close prices
df1=np.log(df0['Close'])
def fracDiff_FFD(series,d,thres=1e-5, w = None):
"""
Constant width window (new solution)
Note 1: thres determines the cut-off weight for the window
Note 2: d can be any positive fractional, not necessarily bounded [0,1].
"""
# 1) Compute weights for the longest series
# Get the length of the series
length = len(series)
# In case we don't have the weights, estimate them with the previous function
# Function to get the ARFIMA weights
def getWeights_FFD(d, thres, lim):
# Set w as a list and k as one
w, k = [1.], 1
ctr = 0
while True:
# Create the new weight value
w_ = -w[-1] / k * (d - k + 1)
# End the loop in case the threshold is breached
if abs(w_) < thres:
# Get the classification report
print(classification_report(y_test, y_pred))
%%time
#Create an object of XGBClassifier with the below input parameters
model1 = XGBClassifier(booster='gbtree', num_leaves = 80, max_depth=5, learning_rate = 0.01, n_estimators=200)
model1.fit(x_train, y_train)
%%time
#Create an object of LGBMClassifier with the below input parameters
model = lgb.LGBMClassifier(num_leaves = 80, max_depth=5, learning_rate = 0.01, n_estimators=200)
model.fit(x_train, y_train)
#Plot confusion matrix for training and testing data
from sklearn.metrics import confusion_matrix, precision_score, accuracy_score
plot_confusion_matrix(model, x_train, y_train)
plt.title("Confusion matrix of training data")
plot_confusion_matrix(model, x_test, y_test)
plt.title("Confusion matrix of testing data")
#To obtain the training and testing accuracy
print('Training accuracy: {0:0.2f}'.format(model.score(x_train,y_train)))
print('Testing accuracy: {0:0.2f}'.format(model.score(x_test,y_test)))