-
-
Save freimanas/39f3ad9a5f0249c0dc64 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python | |
# encoding: utf-8 | |
import tweepy #https://github.com/tweepy/tweepy | |
import csv | |
import sys | |
#Twitter API credentials | |
consumer_key = "" | |
consumer_secret = "" | |
access_key = "" | |
access_secret = "" | |
def get_all_tweets(screen_name): | |
#Twitter only allows access to a users most recent 3240 tweets with this method | |
#authorize twitter, initialize tweepy | |
auth = tweepy.OAuthHandler(consumer_key, consumer_secret) | |
auth.set_access_token(access_key, access_secret) | |
api = tweepy.API(auth) | |
#initialize a list to hold all the tweepy Tweets | |
alltweets = [] | |
#make initial request for most recent tweets (200 is the maximum allowed count) | |
new_tweets = api.user_timeline(screen_name = screen_name,count=1) | |
#save most recent tweets | |
alltweets.extend(new_tweets) | |
#save the id of the oldest tweet less one | |
oldest = alltweets[-1].id - 1 | |
#keep grabbing tweets until there are no tweets left to grab | |
while len(new_tweets) > 0: | |
print "getting tweets before %s" % (oldest) | |
#all subsequent requests use the max_id param to prevent duplicates | |
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest) | |
#save most recent tweets | |
alltweets.extend(new_tweets) | |
#update the id of the oldest tweet less one | |
oldest = alltweets[-1].id - 1 | |
print "...%s tweets downloaded so far" % (len(alltweets)) | |
#go through all found tweets and remove the ones with no images | |
outtweets = [] #initialize master list to hold our ready tweets | |
for tweet in alltweets: | |
#not all tweets will have media url, so lets skip them | |
try: | |
print tweet.entities['media'][0]['media_url'] | |
except (NameError, KeyError): | |
#we dont want to have any entries without the media_url so lets do nothing | |
pass | |
else: | |
#got media_url - means add it to the output | |
outtweets.append([tweet.id_str, tweet.created_at, tweet.text.encode("utf-8"), tweet.entities['media'][0]['media_url']]) | |
#write the csv | |
with open('%s_tweets.csv' % screen_name, 'wb') as f: | |
writer = csv.writer(f) | |
writer.writerow(["id","created_at","text","media_url"]) | |
writer.writerows(outtweets) | |
pass | |
if __name__ == '__main__': | |
#pass in the username of the account you want to download | |
get_all_tweets("WansteadWomble") |
but can we get all pictures in one tweet?
Hi,
Is there anyway I can get hashtag as well in a separate column?
dont save in csv
dont save in csv
just remove the 'b' from 'wb'
Can't we use this to get URL of link shared by twitter
Hey, will this media method also download video URLs or it works only for images?
Hey, will this media method also download video URLs or it works only for images?
Looks like it won't. The script the author provided only shows thumbnail of the video. In order to get the link of the video, you would have to go under tweet.extended_entities["media"][0]["video_info"].
Now I am getting this error:
AttributeError: 'Status' object has no attribute 'entities'
Has there being a change in the API or something?
then you just download all of them with your favourite downloader... for example:
wget -i filename.csv
Where and how to write the command "wget -i '% s_tweets.csv'" to download a picture?
I'm a newbie. Does not work.
Thanks in advance
My edits in the code, downloads images :
#!/usr/bin/env python 3
# encoding: utf-8
import tweepy #https://github.com/tweepy/tweepy
import csv
import sys
import wget
#Twitter API credentials
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=1)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print ("getting tweets before получаем перед %s" % (oldest))
#all subsequent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print ("...%s tweets downloaded so far загружены на данный момент" % (len(alltweets)))
#go through all found tweets and remove the ones with no images
outtweets = [] #initialize master list to hold our ready tweets
for tweet in alltweets:
#not all tweets will have media url, so lets skip them
try:
print (tweet.entities['media'][0]['media_url'])
wget.download(tweet.entities['media'][0]['media_url'])
except (NameError, KeyError):
#we dont want to have any entries without the media_url so lets do nothing
pass
else:
#got media_url - means add it to the output
outtweets.append([tweet.id_str, tweet.created_at, tweet.text, tweet.entities['media'][0]['media_url']])
#write the csv
with open('%s_tweets.csv' % screen_name, 'w', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","text","media_url"])
writer.writerows(outtweets)
pass
if __name__ == '__main__':
#pass in the username of the account you want to download
get_all_tweets("nameofparse")
Useful practice case for me, thanks a lot.