This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def sns_heatmap(dataframe, key_column, key_column_format, chart_title, c_pos_neg): | |
# Build out heatmap things | |
dataframe['Position'] = range(1,len(dataframe) + 1) | |
dataframe['y'] = [(x//10 + 1 if x%10 != 0 else (x//10)) for x in dataframe['Position']] | |
dataframe['x'] = [(x%10 if x%10 != 0 else 10) for x in dataframe['Position']] | |
# most_bought['normalized_dollars_bought_sold'] = (most_bought['dollars_bought_sold'] - most_bought['dollars_bought_sold'].min()) / (most_bought['dollars_bought_sold'].max()-most_bought['dollars_bought_sold'].min()) | |
# most_bought['natural_log'] = np.log(most_bought['normalized_dollars_bought_sold']) | |
pivot_df = dataframe.pivot(index='y', columns='x', values=key_column) | |
ticker_labels = np.asarray(dataframe['Ticker']).reshape((10,10)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --- Functions to support pandas lambda function --- | |
def calc_pct_bought_sold(this_share_count, last_share_count): | |
if this_share_count > 0 and last_share_count == 0: | |
return 100 | |
elif this_share_count == 0 and last_share_count == 0: # In the rare circumstances that both this share count and last share count are zero return 0. | |
return 0 | |
else: | |
return ((this_share_count/last_share_count)-1)*100 | |
def calc_dollar_bought_sold(count_bought_sold, last_holding_val, last_share_count, this_holding_val, this_share_count): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --- Map tickers into the quarterly holding dataframes --- | |
all_this_qtr_holdings['Ticker'] = all_this_qtr_holdings['CUSIP'].map(dict(cusip_mappings)) | |
all_last_qtr_holdings['Ticker'] = all_last_qtr_holdings['CUSIP'].map(dict(cusip_mappings)) | |
all_this_qtr_holdings['Ticker'] = all_this_qtr_holdings['Ticker'].fillna('N/A') | |
all_last_qtr_holdings['Ticker'] = all_last_qtr_holdings['Ticker'].fillna('N/A') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --- Functions for mapping CUSIPs to Tickers --- | |
def write_cusips_to_txt(cusip_mappings, cusip_mapping_file = 'cusips.txt'): | |
''' | |
Write cusip mappings to txt. Minimizing the need for API calls. | |
''' | |
with open(cusip_mapping_file, 'w') as f: | |
f.write(str(cusip_mappings)) | |
return | |
def read_cusips_from_txt(cusip_mapping_file='cusips.txt'): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --- Function for running quick statistics on the dataframe --- | |
def dataframe_statistics(df, manager_name:str, cik:str): | |
''' | |
Build out the holdings dataframes with some basic statistics. | |
''' | |
df['Portfolio percentage'] = (df['Holding value'] / df['Holding value'].sum()) * 100 | |
df['Manager Name'] = manager_name | |
df['CIK'] = cik | |
return df |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[{"name":"PAULSON & CO. INC.", "cik":"0001035674"}, | |
{"name":"BAUPOST GROUP LLC", "cik":"0001061768"}, | |
{"name":"BERKSHIRE HATHAWAY INC", "cik":"0001067983"}, | |
{"name":"GREENLIGHT CAPITAL INC", "cik":"0001079114"}, | |
{"name":"LONE PINE CAPITAL LLC", "cik":"0001061165"}, | |
{"name":"SOROS FUND MANAGEMENT LLC", "cik":"0001029160"}, | |
{"name":"APPALOOSA LP", "cik":"0001656456"}, | |
{"name":"GOLDMAN SACHS GROUP INC", "cik":"0000886982"}, | |
{"name":"SCION ASSET MANAGEMENT, LLC", "cik":"0001649339"}, | |
{"name":"BRIDGEWATER ASSOCIATES, LP", "cik":"0001350694"}, |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#--- Import the Required Libraries --- | |
import requests | |
import json | |
import pandas as pd | |
import open_figi_key | |
import time | |
import seaborn as sns | |
import numpy as np | |
import matplotlib.pyplot as plt |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################################################################### | |
# 6. Remove Weekday Exceptions and Aggregate the Results # | |
############################################################################### | |
# Remove Weekday Exceptions | |
if weekday_exceptions != None: | |
df = df[~df.weekday.isin(weekday_exceptions)] | |
# Aggregate the Results | |
df = df.groupby(aggregate_list).agg({'timestamp':['count'],'distance':['mean']}) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################################################################### | |
# 5. Setup Useful Columns # | |
############################################################################### | |
# Determine the distance of each GPS coordinate from your point of interest. | |
distances_list = [] | |
for lat, long in zip(df['latitude'],df['longitude']): | |
distances_list.append(distance.geodesic(point_of_interest,(lat,long)).km) | |
df['distance'] = distances_list | |
df = df[df['distance'] < distance_from_point_of_interest] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
############################################################################### | |
# 4. Unpack the Data # | |
############################################################################### | |
df_rows = [] | |
for key in locations: | |
timestamp = float(key['timestampMs']) | |
latitude = float(key['latitudeE7'])/10000000 | |
longitude = float(key['longitudeE7'])/10000000 | |
if timestamp >= start_datetime and timestamp <= end_datetime: |
NewerOlder