-
-
Save kevinbsc/2f3996fef349f3f569bcd89c106677bd to your computer and use it in GitHub Desktop.
Get twitter user's photo url's from tweets - download all images from twitter user
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
# encoding: utf-8 | |
import tweepy #https://github.com/tweepy/tweepy | |
import csv | |
import sys | |
#Twitter API credentials | |
consumer_key = "" | |
consumer_secret = "" | |
access_key = "" | |
access_secret = "" | |
def get_all_tweets(screen_name): | |
#Twitter only allows access to a users most recent 3240 tweets with this method | |
#authorize twitter, initialize tweepy | |
auth = tweepy.OAuthHandler(consumer_key, consumer_secret) | |
auth.set_access_token(access_key, access_secret) | |
api = tweepy.API(auth) | |
#initialize a list to hold all the tweepy Tweets | |
alltweets = [] | |
#make initial request for most recent tweets (200 is the maximum allowed count) | |
new_tweets = api.user_timeline(screen_name = screen_name,count=1) | |
#save most recent tweets | |
alltweets.extend(new_tweets) | |
#save the id of the oldest tweet less one | |
oldest = alltweets[-1].id - 1 | |
#keep grabbing tweets until there are no tweets left to grab | |
while len(new_tweets) > 0: | |
print "getting tweets before %s" % (oldest) | |
#all subsequent requests use the max_id param to prevent duplicates | |
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest) | |
#save most recent tweets | |
alltweets.extend(new_tweets) | |
#update the id of the oldest tweet less one | |
oldest = alltweets[-1].id - 1 | |
print "...%s tweets downloaded so far" % (len(alltweets)) | |
#go through all found tweets and remove the ones with no images | |
outtweets = [] #initialize master list to hold our ready tweets | |
for tweet in alltweets: | |
#not all tweets will have media url, so lets skip them | |
try: | |
print tweet.entities['media'][0]['media_url'] | |
except (NameError, KeyError): | |
#we dont want to have any entries without the media_url so lets do nothing | |
pass | |
else: | |
#got media_url - means add it to the output | |
outtweets.append([tweet.id_str, tweet.created_at, tweet.text.encode("utf-8"), tweet.entities['media'][0]['media_url']]) | |
#write the csv | |
with open('%s_tweets.csv' % screen_name, 'wb') as f: | |
writer = csv.writer(f) | |
writer.writerow(["id","created_at","text","media_url"]) | |
writer.writerows(outtweets) | |
pass | |
if __name__ == '__main__': | |
#pass in the username of the account you want to download | |
get_all_tweets("WansteadWomble") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment