Skip to content

Instantly share code, notes, and snippets.

@KobaKhit
Last active November 2, 2024 18:36
Show Gist options
  • Save KobaKhit/5109896f18471f6240e4db973b2ee672 to your computer and use it in GitHub Desktop.
Save KobaKhit/5109896f18471f6240e4db973b2ee672 to your computer and use it in GitHub Desktop.
Example of using stubhub inverntory v2 api to download all listings for a given event id.
import requests
import base64
import pprint
import pandas as pd
import json
from tqdm import tqdm
# https://stubhubapi.zendesk.com/hc/en-us/articles/220922687-Inventory-Search
class St(object):
'''Class for interfacing with Stubhub's Inventory search API.'''
def __init__(self,app_token,consumer_key,consumer_secret,stubhub_username,stubhub_password):
# Authorizaton
combo = consumer_key + ':' + consumer_secret
basic_authorization_token = base64.b64encode(combo.encode('utf-8')).decode()
url = 'https://api.stubhub.com/login'
headers = {
'Content-Type':'application/x-www-form-urlencoded',
'Authorization':'Basic '+ basic_authorization_token,}
body = {
'grant_type':'password',
'username':stubhub_username,
'password':stubhub_password,
'scope':'PRODUCTION'}
r = requests.post(url, headers=headers, data=body) # request token
token_respoonse = r.json()
access_token = token_respoonse['access_token']
user_GUID = r.headers['X-StubHub-User-GUID']
self.headers = {}
self.headers['Authorization'] = 'Bearer ' + access_token
self.headers['Accept'] = 'application/json'
self.headers['Accept-Encoding'] = 'application/json'
def process_listings(self,inv):
# Process stubhub's api response
listings = inv['listing']
fields = ['listingId','sectionId','row','quantity','sellerSectionName','sectionName',\
'zoneId','zoneName','dirtyTicketInd','score']
l = []
for k in listings:
ret = {}
for f in fields:
if f in k:
ret[f] = k[f]
else:
ret[f] = 'NA'
ret['currentPrice'] = k['currentPrice']['amount']
ret['listingPrice'] = k['listingPrice']['amount']
ret['seatNumbers'] = k['seatNumbers'].replace(',',';') if 'seatNumbers' in k else 'NA'
l.append(ret)
return(l)
def get_listings(self,eventid,pages=False):
# Get all current listings for an event up to 2000 listings.
# Beyond 2000 might hit the "2000 requests every ten minutes" api limit.
req_count = 0
inventory_url = 'https://api.stubhub.com/search/inventory/v2'
data = {'eventid':eventid,'rows':200,'start':0}
inventory = requests.get(inventory_url, headers=self.headers, params=data).json()
total_listings = inventory['totalListings']
if pages is True:
start = 200
while start < total_listings:
data = {'eventid':eventid,'rows':200,'start':start}
inv_temp = requests.get(inventory_url, headers=self.headers, params=data).json()
inventory['listing'].extend(inv_temp['listing'])
start+=200
inventory['rows'] = total_listings
del inventory['minQuantity']
del inventory['maxQuantity']
inv = self.process_listings(inventory)
return(inv)
def main():
## Enter user's API key, secret, and Stubhub login
app_token = ''
consumer_key = ''
consumer_secret = ''
stubhub_username = ''
stubhub_password = ''
st = St(app_token,consumer_key,consumer_secret,stubhub_username,stubhub_password)
# Event ids
events = pd.read_csv('sixers-events.csv')
# Get listings for event ids
for eid in tqdm(events['Eventid']):
inv = st.get_listings(eventid=eid,pages=True) # api request
df = pd.DataFrame(inv) # turn into a dataframe
df['Event'] = eid # add event id column
df.to_csv('data\\current_inventory_{}.csv'.format(eid), index=False) # save to file
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment