Last active
September 29, 2015 11:55
-
-
Save schalekamp/adb1439af824a12f58a3 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
%matplotlib inline | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import cvxopt as opt | |
from cvxopt import blas, solvers | |
import pandas as pd | |
import pandas.io.data as web | |
import datetime | |
# Turn off progress printing | |
solvers.options['show_progress'] = False | |
''' | |
# http://qoppac.blogspot.nl/2014/06/the-five-minute-portfolio.html | |
When I actually do in my optimisation I am going to assume that everything I have has the same net Sharpe ratio, | |
i.e. the returns scale precisely to the realised volatility. Of course this isn't quite the same as assuming | |
the same unknown gross return and subtracting known fees; but it is neater. I am also going to assume that we | |
have an annual risk threshold of 10%. This is about as risky as the FTSE 100 ETF returns over the last year | |
(I will relax that assumption at the end). | |
''' | |
def optimal_portfolio_fixed_sharpe(returns,tgt_vol=.1): | |
n = len(returns) | |
returns = np.asmatrix(returns) | |
N = 100 | |
mus = [10**(5.0 * t/N - 1.0) for t in range(N)] | |
# Convert to cvxopt matrices | |
S = opt.matrix(np.cov(returns)) | |
# fixed average sharpe, implied mean returns based on historical volatility | |
sharpes=16*np.mean(returns, axis=1)/np.std(returns,axis=1) | |
sharpebar = max(sharpes.mean(),0.25) | |
sharpebar = sharpes.mean() | |
pbar = opt.matrix(np.std(returns, axis=1)*sharpebar/np.sqrt(252)) | |
#pbar = opt.matrix(np.mean(returns, axis=1)) | |
# Create constraint matrices | |
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix | |
h = opt.matrix(0.0, (n ,1)) | |
A = opt.matrix(1.0, (1, n)) | |
b = opt.matrix(1.0) | |
# Calculate efficient frontier weights using quadratic programming | |
portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] | |
## CALCULATE RISKS AND RETURNS FOR FRONTIER | |
returns = [blas.dot(pbar, x) for x in portfolios] | |
risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] | |
my = {np.sqrt(blas.dot(x, S*x)):x for x in portfolios } | |
closest = { k:abs(k-tgt_vol) for k in my.keys() } | |
cl_sorted = sorted(closest.items(), key=lambda x: x[1]) | |
key = cl_sorted[0][0] | |
wt = my[key]*100 | |
plt.plot(risks, returns, 'o', markersize=5) | |
plt.xlabel('std') | |
plt.ylabel('mean') | |
return np.asarray(wt), returns, risks | |
''' | |
# http://blog.quantopian.com/markowitz-portfolio-optimization-2/ | |
''' | |
def optimal_portfolio(returns): | |
n = len(returns) | |
returns = np.asmatrix(returns) | |
N = 100 | |
mus = [10**(5.0 * t/N - 1.0) for t in range(N)] | |
# Convert to cvxopt matrices | |
S = opt.matrix(np.cov(returns)) | |
pbar = opt.matrix(np.mean(returns, axis=1)) | |
# Create constraint matrices | |
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix | |
h = opt.matrix(0.0, (n ,1)) | |
A = opt.matrix(1.0, (1, n)) | |
b = opt.matrix(1.0) | |
# Calculate efficient frontier weights using quadratic programming | |
portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] | |
for mu in mus] | |
## CALCULATE RISKS AND RETURNS FOR FRONTIER | |
returns = [blas.dot(pbar, x) for x in portfolios] | |
risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] | |
## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE | |
m1 = np.polyfit(returns, risks, 2) | |
x1 = np.sqrt(m1[2] / m1[0]) | |
# CALCULATE THE OPTIMAL PORTFOLIO | |
wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] | |
return np.asarray(wt), returns, risks | |
####### main | |
start = datetime.date(2013,1,1) | |
end = datetime.date(2014,6,1) | |
symbols = ['VUKE.L','VAPX.L','VEUR.L','VUSA.L','IGLO.L','SEMB.L','HYLD.L','CORP.L'] | |
vola_tgt = 10./100 | |
# get log returns from yahoo finance | |
df = web.get_data_yahoo(symbols,start=start,end=end) | |
adj_close = df['Adj Close'] | |
rets = np.log(adj_close.pct_change()+1).dropna(axis=0,how='any') | |
retvec = rets.as_matrix().T | |
# min variance portfolio based on all returns | |
weights, returns, risks = optimal_portfolio(retvec) | |
print 'weights min variance optimization based on point estimate cov matrix' | |
print '\n'.join([ '%s : % 2.2f' % (s,weights[i][0]) for i,s in enumerate(rets.columns)]) | |
# target portfolio based on all returns | |
print 'weights target volatility optimization based on point estimate cov matrix' | |
weights, returns, risks = optimal_portfolio_fixed_sharpe(retvec,vola_tgt) | |
print '\n'.join([ '%s : % 2.2f' % (s,weights[i][0]) for i,s in enumerate(rets.columns)]) | |
# bootstrapping | |
x = [] | |
for i in range(100): | |
r=[] | |
for s in rets.columns: | |
sample_size = int(30/252.*len(retvec[0])) | |
days = np.random.choice(range(len(retvec[0])),sample_size) | |
a = np.array(rets[s]) | |
r.append( np.array( np.array([a[d] for d in days])) ) | |
w=np.vstack(r) | |
weights, returns, risks = optimal_portfolio_fixed_sharpe(w,vola_tgt) | |
x.append(weights) | |
# average portfolio weights from bootstrapping process | |
X = np.hstack(x) | |
X = np.asmatrix(X) | |
print 'weights target volatility optimization based on bootstrapping' | |
for i,c in enumerate(rets.columns): | |
print '%s : % 2.2f' % (c,X[i,:].mean()) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment