Skip to content

Instantly share code, notes, and snippets.

@shashankvemuri
shashankvemuri / imports.py
Last active January 4, 2024 05:28
Import these dependencies and set the variables
# Imports
from pandas_datareader import data as pdr
from pandas import ExcelWriter
import yfinance as yf
import pandas as pd
import datetime
import time
import requests
yf.pdr_override()
@shashankvemuri
shashankvemuri / rs_rating.py
Last active September 7, 2022 15:19
The beginning of the screener
# Index Returns
index_df = pdr.get_data_yahoo(index_name, start_date, end_date)
index_df['Percent Change'] = index_df['Adj Close'].pct_change()
index_return = (index_df['Percent Change'] + 1).cumprod()[-1]
# Find top 30% performing stocks (relative to the S&P 500)
for ticker in tickers:
# Download historical data as CSV for each stock (makes the process faster)
df = pdr.get_data_yahoo(ticker, start_date, end_date)
df.to_csv(f'{ticker}.csv')
@shashankvemuri
shashankvemuri / try-except.py
Last active February 3, 2023 23:32
Minervini Conditions
# Checking Minervini conditions of top 30% of stocks in given list
rs_stocks = rs_df['Ticker']
for stock in rs_stocks:
try:
df = pd.read_csv(f'{stock}.csv', index_col=0)
sma = [50, 150, 200]
for x in sma:
df["SMA_"+str(x)] = round(df['Adj Close'].rolling(window=x).mean(), 2)
# Storing required values
@shashankvemuri
shashankvemuri / end.py
Last active April 21, 2020 02:32
End of the stock screener
print(exportList)
writer = ExcelWriter("ScreenOutput.xlsx")
exportList.to_excel(writer, "Sheet1")
writer.save()
@shashankvemuri
shashankvemuri / code.py
Last active August 16, 2021 01:39
This gist contains all the code needed for the algorithm.
import requests
import pandas as pd
from yahoo_fin import stock_info as si
from pandas_datareader import DataReader
import numpy as np
tickers = si.tickers_sp500()
recommendations = []
for ticker in tickers:
@shashankvemuri
shashankvemuri / screener.py
Last active January 4, 2024 05:28
the entire stock screener code
# Imports
from pandas_datareader import data as pdr
from pandas import ExcelWriter
import yfinance as yf
import pandas as pd
import datetime
import time
import requests
yf.pdr_override()
@shashankvemuri
shashankvemuri / intro.py
Last active May 25, 2020 03:17
imports and parameters needed for code
# Import libraries
import pandas as pd
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
from urllib.request import urlopen
from urllib.request import Request
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Parameters
n = 3 #the # of article headlines displayed per ticker
@shashankvemuri
shashankvemuri / get_data.py
Last active May 25, 2020 03:35
get data from finviz
# Get Data
finviz_url = 'https://finviz.com/quote.ashx?t='
news_tables = {}
for ticker in tickers:
url = finviz_url + ticker
req = Request(url=url,headers={'user-agent': 'my-app/0.0.1'})
resp = urlopen(req)
html = BeautifulSoup(resp, features="lxml")
news_table = html.find(id='news-table')
@shashankvemuri
shashankvemuri / iterate_news.py
Created May 25, 2020 03:18
iterate through the news
# Iterate through the news
parsed_news = []
for file_name, news_table in news_tables.items():
for x in news_table.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
@shashankvemuri
shashankvemuri / sentiment_analysis.py
Created May 25, 2020 03:19
perform sentiment analysis
# Sentiment Analysis
analyzer = SentimentIntensityAnalyzer()
columns = ['Ticker', 'Date', 'Time', 'Headline']
news = pd.DataFrame(parsed_news, columns=columns)
scores = news['Headline'].apply(analyzer.polarity_scores).tolist()
df_scores = pd.DataFrame(scores)
news = news.join(df_scores, rsuffix='_right')