Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save asaf400/97bf1e4733f2a3e26484f46416619eb0 to your computer and use it in GitHub Desktop.
Save asaf400/97bf1e4733f2a3e26484f46416619eb0 to your computer and use it in GitHub Desktop.
import os.path
import googleapiclient.discovery
import urllib.request
import datetime
# Script Written by LLM
# Set up API credentials - Created here (API Keys): https://console.cloud.google.com/apis/credentials
api_key = '' # Replace with your API key
# Create a YouTube API client
youtube = googleapiclient.discovery.build('youtube', 'v3', developerKey=api_key)
# Specify the channel name
channel_name = '' # Replace with a channel name
# Request the channel ID using the channel name
channel_response = youtube.channels().list(
part='id',
forUsername=channel_name
).execute()
# Retrieve the channel ID
channel_id = channel_response['items'][0]['id']
os.mkdir(channel_id)
# Define time range for the search
published_before = datetime.datetime.now()
published_after = datetime.datetime(year=2000, month=1, day=1) # Adjust as needed
# Request the uploads playlist ID of the channel
uploads_response = youtube.channels().list(
part='contentDetails',
id=channel_id
).execute()
# Retrieve the uploads playlist ID
uploads_playlist_id = uploads_response['items'][0]['contentDetails']['relatedPlaylists']['uploads']
# Request the list of videos uploaded to the channel (with pagination)
next_page_token = None
video_ids = []
while True:
response = youtube.playlistItems().list(
part='snippet',
playlistId=uploads_playlist_id,
maxResults=50, # Adjust the number of results per page as per your requirements
pageToken=next_page_token
).execute()
# Retrieve video IDs from the current page
for item in response['items']:
video_ids.append(item['snippet']['resourceId']['videoId'])
# Check if there are more pages
if 'nextPageToken' in response:
next_page_token = response['nextPageToken']
else:
break
# Batch video retrieval
batch_size = 50 # Adjust the batch size as per your requirements
for i in range(0, len(video_ids), batch_size):
batch_video_ids = video_ids[i:i+batch_size]
response = youtube.videos().list(
part='snippet',
id=','.join(batch_video_ids)
).execute()
for item in response['items']:
video_id = item['id']
sizes = ['standard', 'high', 'medium', 'default']
tsize = next((size for size in sizes if size in item['snippet']['thumbnails']), '')
thumbnail_url = item['snippet']['thumbnails'][tsize]['url']
thumbnail_file_name = f'{channel_id}/{video_id}.jpg'
if not os.path.isfile(thumbnail_file_name):
urllib.request.urlretrieve(thumbnail_url, thumbnail_file_name)
print(f'Downloaded thumbnail for video {video_id}.')
published_at=datetime.datetime.fromisoformat(item['snippet']['publishedAt']).timestamp()
os.utime(thumbnail_file_name, (published_at,published_at))
print('Thumbnail download completed.')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment