Last active
September 9, 2020 21:01
-
-
Save shashankvemuri/048060f056e36026ce996bb637b87bdf to your computer and use it in GitHub Desktop.
Create the function to get all the data
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def getData(list_of_stocks): | |
| for stock in list_of_stocks: | |
| df = DataReader(stock, 'yahoo', start, end) | |
| print (stock) | |
| # Current Price | |
| price = si.get_live_price('{}'.format(stock)) | |
| price = round(price, 2) | |
| # Sharpe Ratio | |
| x = 5000 | |
| y = (x) | |
| stock_df = df | |
| stock_df['Norm return'] = stock_df['Adj Close'] / stock_df.iloc[0]['Adj Close'] | |
| allocation = float(x/y) | |
| stock_df['Allocation'] = stock_df['Norm return'] * allocation | |
| stock_df['Position'] = stock_df['Allocation'] * x | |
| pos = [df['Position']] | |
| val = pd.concat(pos, axis=1) | |
| val.columns = ['WMT Pos'] | |
| val['Total Pos'] = val.sum(axis=1) | |
| val.tail(1) | |
| val['Daily Return'] = val['Total Pos'].pct_change(1) | |
| Sharpe_Ratio = val['Daily Return'].mean() / val['Daily Return'].std() | |
| A_Sharpe_Ratio = (252**0.5) * Sharpe_Ratio | |
| A_Sharpe_Ratio = round(A_Sharpe_Ratio, 2) | |
| # News Sentiment | |
| finwiz_url = 'https://finviz.com/quote.ashx?t=' | |
| news_tables = {} | |
| url = finwiz_url + stock | |
| req = Request(url=url,headers={'user-agent': 'my-app/0.0.1'}) | |
| response = urlopen(req) | |
| html = BeautifulSoup(response, features="lxml") | |
| news_table = html.find(id='news-table') | |
| news_tables[stock] = news_table | |
| parsed_news = [] | |
| # Iterate through the news | |
| for file_name, news_table in news_tables.items(): | |
| for x in news_table.findAll('tr'): | |
| text = x.a.get_text() | |
| date_scrape = x.td.text.split() | |
| if len(date_scrape) == 1: | |
| time = date_scrape[0] | |
| else: | |
| date = date_scrape[0] | |
| time = date_scrape[1] | |
| ticker = file_name.split('_')[0] | |
| parsed_news.append([ticker, date, time, text]) | |
| vader = SentimentIntensityAnalyzer() | |
| columns = ['ticker', 'date', 'time', 'headline'] | |
| dataframe = pd.DataFrame(parsed_news, columns=columns) | |
| scores = dataframe['headline'].apply(vader.polarity_scores).tolist() | |
| scores_df = pd.DataFrame(scores) | |
| dataframe = dataframe.join(scores_df, rsuffix='_right') | |
| dataframe['date'] = pd.to_datetime(dataframe.date).dt.date | |
| dataframe = dataframe.set_index('ticker') | |
| sentiment = round(dataframe['compound'].mean(), 2) | |
| # Beta | |
| df = DataReader(stock,'yahoo',start, end) | |
| dfb = DataReader('^GSPC','yahoo',start, end) | |
| rts = df.resample('M').last() | |
| rbts = dfb.resample('M').last() | |
| dfsm = pd.DataFrame({'s_adjclose' : rts['Adj Close'], | |
| 'b_adjclose' : rbts['Adj Close']}, | |
| index=rts.index) | |
| dfsm[['s_returns','b_returns']] = dfsm[['s_adjclose','b_adjclose']]/\ | |
| dfsm[['s_adjclose','b_adjclose']].shift(1) -1 | |
| dfsm = dfsm.dropna() | |
| covmat = np.cov(dfsm["s_returns"],dfsm["b_returns"]) | |
| beta = covmat[0,1]/covmat[1,1] | |
| beta = round(beta, 2) | |
| # Relative Strength Index | |
| df["rsi"] = talib.RSI(df["Close"]) | |
| values = df["rsi"].tolist()[-1] | |
| rsi = round(value, 2) | |
| output = ("\nTicker: " + str(stock) + "\nCurrent Price : " + str(price) + "\nSharpe Ratio: " + str(A_Sharpe_Ratio) + "\nNews Sentiment: " + str(sentiment) + "\nRelative Strength Index: " + str(rsi) + "\nBeta Value for 1 Year: " + str(beta)) | |
| sendMessage(output) | |
| if __name__ == '__main__': | |
| getData(stock_list) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment