Created
July 10, 2019 23:19
-
-
Save jparkhill/b16e3c0055b9dd29146d9c4300f5a7d3 to your computer and use it in GitHub Desktop.
Kelly Sizing Optimizer for Monte Carlo Samples in Pytorch
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def kelly_loss(c_assets, r_assets, spreads, spread_loss): | |
""" | |
Maximizes exponential rate of wealth growth | |
NOTE: the samples must be in the same order. | |
the assumption is that the returns are realized | |
in the same way. | |
Args: | |
c_assets: (nasset) tensor of coefficients (normalized) | |
r_assets: (nasset X nsamples) tensor of returns. | |
""" | |
nsamples = r_assets.shape[1] | |
log_cr_p1 = tch.log(tch.einsum('k,ki->i',c_assets,r_assets)+1.) | |
return (-1./nsamples)*tch.sum(log_cr_p1) | |
def lagrange_term(c_assets, multiplier): | |
return tch.flatten((tch.sum(c_assets) - 1.).relu_().unsqueeze(0)*multiplier) | |
def OptimizeSizing(return_samples, under_samples, spreads, | |
spread_loss = .7, npos = 4, no_short=True): | |
""" | |
The sizing optimizer seeks to build a portfolio of assets, based on | |
samples from the return distribution and a few commonsense constraints. | |
uses a lagrangian to fix the sum of investments and cash to one. | |
NOTE: the samples must be in the same order. | |
the assumption is that the returns are realized | |
in the same way. | |
Args: | |
return_samples: (nasset X nsamples) array | |
under_samples: (nsamples) array of the underlying | |
spreads: spreads on each asset (percent) to discount returns. | |
""" | |
nasset = return_samples.shape[0] | |
# the coefficients to be minimized: | |
# The first param is the Lagrange Multiplier | |
c_asset = tch.nn.Parameter(tch.ones(nasset).double()*((1e-2)/nasset),requires_grad=True) | |
multiplier = tch.nn.Parameter(tch.tensor([1.]).double(),requires_grad=True) | |
optimizer = tch.optim.Adam([c_asset,multiplier], lr=0.002, weight_decay=0.0, amsgrad=False) | |
loss_value = tch.tensor([10.],requires_grad=True).double() | |
last_loss = tch.tensor([20.],requires_grad=False).double() | |
iter = 0 | |
maxiter = 1500 | |
while(iter < maxiter and tch.abs(loss_value-last_loss)>1e-14): | |
last_loss = loss_value | |
loss_value = kelly_loss(c_asset, return_samples, spreads, spread_loss) + lagrange_term(c_asset,multiplier) | |
optimizer.zero_grad() | |
loss_value.backward() | |
optimizer.step() | |
if(no_short): | |
with tch.no_grad(): | |
c_asset.clamp_(0.,1.) | |
print("Iter: ", iter,"Loss:", loss_value.item()," dloss ", tch.abs(loss_value-last_loss)) | |
iter += 1 | |
return c_asset.detach() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment