-
Star
(145)
You must be signed in to star a gist -
Fork
(45)
You must be signed in to fork a gist
-
-
Save genekogan/ebd77196e4bf0705db51f86431099e57 to your computer and use it in GitHub Desktop.
from bs4 import BeautifulSoup | |
import requests | |
import re | |
import urllib2 | |
import os | |
import argparse | |
import sys | |
import json | |
# adapted from http://stackoverflow.com/questions/20716842/python-download-images-from-google-image-search | |
def get_soup(url,header): | |
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser') | |
def main(args): | |
parser = argparse.ArgumentParser(description='Scrape Google images') | |
parser.add_argument('-s', '--search', default='bananas', type=str, help='search term') | |
parser.add_argument('-n', '--num_images', default=10, type=int, help='num images to save') | |
parser.add_argument('-d', '--directory', default='/Users/gene/Downloads/', type=str, help='save directory') | |
args = parser.parse_args() | |
query = args.search#raw_input(args.search) | |
max_images = args.num_images | |
save_directory = args.directory | |
image_type="Action" | |
query= query.split() | |
query='+'.join(query) | |
url="https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch" | |
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"} | |
soup = get_soup(url,header) | |
ActualImages=[]# contains the link for Large original images, type of image | |
for a in soup.find_all("div",{"class":"rg_meta"}): | |
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"] | |
ActualImages.append((link,Type)) | |
for i , (img , Type) in enumerate( ActualImages[0:max_images]): | |
try: | |
req = urllib2.Request(img, headers={'User-Agent' : header}) | |
raw_img = urllib2.urlopen(req).read() | |
if len(Type)==0: | |
f = open(os.path.join(save_directory , "img" + "_"+ str(i)+".jpg"), 'wb') | |
else : | |
f = open(os.path.join(save_directory , "img" + "_"+ str(i)+"."+Type), 'wb') | |
f.write(raw_img) | |
f.close() | |
except Exception as e: | |
print "could not load : "+img | |
print e | |
if __name__ == '__main__': | |
from sys import argv | |
try: | |
main(argv) | |
except KeyboardInterrupt: | |
pass | |
sys.exit() |
@hoangphuc1998 the original program also downloaded thumbnails instead of images, I believe
from selenium import webdriver
import time
import requests
import shutil
import os
import argparse
def save_img(inp,img,i, directory):
try:
filename = inp+str(i)+'.jpg'
response = requests.get(img,stream=True)
image_path = os.path.join(directory, filename)
with open(image_path, 'wb') as file:
shutil.copyfileobj(response.raw, file)
except Exception:
pass
def find_urls(inp,url,driver, directory):
driver.get(url)
for _ in range(500):
driver.execute_script("window.scrollBy(0,10000)")
try:
driver.find_element_by_css_selector('.mye4qd').click()
except:
continue
for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')):
try:
imgurl.click()
img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src")
save_img(inp,img,j, directory)
time.sleep(1.5)
except:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Scrape Google images')
parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
args = parser.parse_args()
driver = webdriver.Chrome('/path_to_chromedriver')
directory = args.directory
inp = args.search
if not os.path.isdir(directory):
os.makedirs(directory)
url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947'
find_urls(inp,url,driver, directory)
This script will download all images in larger size
@hoangphuc1998 doesn't work for me, it just clicks through the images but doesn't download anything
Quick install steps would be: 1. install conda and activate, 2. pip install selenium 3. install chromedriver and extract it somewhere 4. edit the script and provide chromedriver path location in the arguments of webdriver.Chrome() 5. change the parameters for search and directory ( parser.add_argument('-s')) or pass them in the next step 6. execute the script 7. go to system preferences>permissions and allow chromedriver to work on mac
@AnnBkrv try specifying the right directory using --directory parameter
@akshat-khare i did. i tried to run a portion of this script in a notebook, like debugging, and the image gets clicked, but then the url is not retrieved. so the image can't be downloaded
from selenium import webdriver import time import requests import shutil import os import argparse def save_img(inp,img,i, directory): try: filename = inp+str(i)+'.jpg' response = requests.get(img,stream=True) image_path = os.path.join(directory, filename) with open(image_path, 'wb') as file: shutil.copyfileobj(response.raw, file) except Exception: pass def find_urls(inp,url,driver, directory): driver.get(url) for _ in range(500): driver.execute_script("window.scrollBy(0,10000)") try: driver.find_element_by_css_selector('.mye4qd').click() except: continue for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')): try: imgurl.click() img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src") save_img(inp,img,j, directory) time.sleep(1.5) except: pass if __name__ == "__main__": parser = argparse.ArgumentParser(description='Scrape Google images') parser.add_argument('-s', '--search', default='bananas', type=str, help='search term') parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory') args = parser.parse_args() driver = webdriver.Chrome('/path_to_chromedriver') directory = args.directory inp = args.search if not os.path.isdir(directory): os.makedirs(directory) url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947' find_urls(inp,url,driver, directory)
This script will download all images in larger size
Working fine guys! ( it is necesary before install selenium and chromedriver.exe)
from selenium import webdriver import time import requests import shutil import os import argparse def save_img(inp,img,i, directory): try: filename = inp+str(i)+'.jpg' response = requests.get(img,stream=True) image_path = os.path.join(directory, filename) with open(image_path, 'wb') as file: shutil.copyfileobj(response.raw, file) except Exception: pass def find_urls(inp,url,driver, directory): driver.get(url) for _ in range(500): driver.execute_script("window.scrollBy(0,10000)") try: driver.find_element_by_css_selector('.mye4qd').click() except: continue for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')): try: imgurl.click() img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src") save_img(inp,img,j, directory) time.sleep(1.5) except: pass if __name__ == "__main__": parser = argparse.ArgumentParser(description='Scrape Google images') parser.add_argument('-s', '--search', default='bananas', type=str, help='search term') parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory') args = parser.parse_args() driver = webdriver.Chrome('/path_to_chromedriver') directory = args.directory inp = args.search if not os.path.isdir(directory): os.makedirs(directory) url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947' find_urls(inp,url,driver, directory)
This script will download all images in larger size
Great stuff. Worked just fine. Please add it in a repo or so.
from selenium import webdriver import time import requests import shutil import os import argparse def save_img(inp,img,i, directory): try: filename = inp+str(i)+'.jpg' response = requests.get(img,stream=True) image_path = os.path.join(directory, filename) with open(image_path, 'wb') as file: shutil.copyfileobj(response.raw, file) except Exception: pass def find_urls(inp,url,driver, directory): driver.get(url) for _ in range(500): driver.execute_script("window.scrollBy(0,10000)") try: driver.find_element_by_css_selector('.mye4qd').click() except: continue for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')): try: imgurl.click() img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src") save_img(inp,img,j, directory) time.sleep(1.5) except: pass if __name__ == "__main__": parser = argparse.ArgumentParser(description='Scrape Google images') parser.add_argument('-s', '--search', default='bananas', type=str, help='search term') parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory') args = parser.parse_args() driver = webdriver.Chrome('/path_to_chromedriver') directory = args.directory inp = args.search if not os.path.isdir(directory): os.makedirs(directory) url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947' find_urls(inp,url,driver, directory)
This script will download all images in larger size
Great stuff. Worked just fine. Please add it in a repo or so.
Can anyone explain what happens inside the second for loop of find_urls please.
I try to run the code and the save_img function is not called once yet you say it works 'just fine'
Thanks.
Hey guys! Does anyone here know how to keep this code running infinitely?
You could schedule the job to run every hour or something using Windows Task Manager, CRON, Airflow, the schedule package, etc.
Yeah, that's an idea - I'm curious though, will google return new sets of images? That's a concern. Or perhaps I can have the process write to the same directory and ignore the file that has been written by finding its URL.
Hey there,
Would anyone of you guys be able to change this code in a way that after clicking on the first image of the search it clicks on one of the similar images provided by google and continues doing this with every image?
` #this code working now..
from selenium import webdriver;
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import urllib.request
import json;
import os
import time
driver = webdriver.Chrome('D:\webdrivers\chromedriver.exe');
default_path = "D:\\google_images"
def start(country, job_name):
images = driver.find_elements_by_css_selector(".rg_i.Q4LuWd")
_path = os.path.join(default_path, country+"\\" + job_name)
try:
os.mkdir(_path)
except:
pass
os.chdir(_path)
count_images = len(images)
count = 0;
for image in images:
count += 1
if not image.get_attribute('src') == "":
print(str(count))
try:
time.sleep(3)
image.click()
time.sleep(5)
try:
big_image = driver.find_element_by_css_selector("#Sva75c > div > div > div.pxAole > div.tvh9oe.BIB1wf > c-wiz > div > div.OUZ5W > div.zjoqD > div > div.v4dQwb > a > img")
#url = driver.execute_script("return document.querySelector('.d87Otf').parentElement.parentElement.parentElement.querySelector(\"[jsname='HiaYvf']\").src")
url = big_image.get_attribute("src")
print(url)
try:
urllib.request.urlretrieve(url, str(count) + "_car.jpg")
except:
print("indirme hatası")
except:
print("javascript hatası")
except:
print("tıklanamadı")
driver.get(uk_very_big_cars_on_road)
start("uk_big", "uk_very_big_cars_on_road_01" )v`
I was able to use hoangphuc1998's method earlier in the year successfully, but that started getting only the downsized thumbnail images.
Using resatarikan's version gets me some of full size images, but it's still inconsistent.
Maybe google changed how images are loaded now, but I noticed that driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src")
will now return data:image/png;base64,{long_random_string}
.
Hello, when I run this program (script which download all images in larger size), Google opens and scrolls but an error terminates the program (picture).
I've had this problem several times when trying other codes. Could someone please help me?
Capture d’écran 2023-01-04 115440
Hello, when I run this program (script which download all images in larger size), Google opens and scrolls but an error terminates the program (picture). I've had this problem several times when trying other codes. Could someone please help me? Capture d’écran 2023-01-04 115440
Hello,
find_element_by_* commands are deprecated.
https://stackoverflow.com/questions/69875125/find-element-by-commands-are-deprecated-in-selenium
You have to use find_elements(By.XPATH, '//......')
Thank you for your reply but it didn't work : for j, imgurl in enumerate(driver.find_element(By.XPATH,'//img[contains(@Class,"rg_i Q4LuWd")]')):
NameError: name 'By' is not defined
You have to import selenium By module.
you have to update the class definition in xpath
Thanks, I will try !
Hello, I tried to update the Xpath class but it does not change anything. I did some research but I have trouble understanding. I had to change some things, now there is no more error but it still doesn't work. Google opens the page, scrolls and that's it. Do you think you could try on your side with the link and tell me if it works? And eventually tell me how you did it please?
Here is the link (https://www.google.com/search?q=french+pedestrian+light&rlz=1C1CHBF_frFR1008FR1008&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiGgui61IX9AhVaQaQEHeS4CkgQ_AUoAXoECAEQAw&biw=1536&bih=714&dpr=1.25).
`
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import time
import requests
import shutil
import os
import argparse
def save_img(inp,img,i, directory):
try:
filename = inp+str(i)+'.jpg'
response = requests.get(img,stream=True)
image_path = os.path.join(directory, filename)
with open(image_path, 'wb') as file:
shutil.copyfileobj(response.raw, file)
except Exception:
pass
def find_urls(inp,url,driver, directory):
driver.get(url)
img_urls = driver.find_elements(By.XPATH,'//img[contains(@Class,"rg_i yDmH0d")]')
#img_urls = driver.find_elements(By.XPATH,'//*[@id="yDmH0d"]')
for _ in range(500):
driver.execute_script("window.scrollBy(0,10000)")
try:
driver.find_element_by_css_selector('.mye4qd').click()
except:
continue
for j, imgurl in enumerate(img_urls):
try:
img = imgurl.get_attribute('src')
save_img(inp,img,j, directory)
time.sleep(1.5)
except:
pass
s=Service('C:\chromedriver.exe')
if name == "main":
parser = argparse.ArgumentParser(description='Scrape Google images')
parser.add_argument('-s', '--search', default='banana', type=str, help='search term')
parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
args = parser.parse_args()
driver = webdriver.Chrome(service=s)
directory = args.directory
inp = args.search
if not os.path.isdir(directory):
os.makedirs(directory)
url = 'https://www.google.com/search?q=french+pedestrian+light&rlz=1C1CHBF_frFR1008FR1008&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiGgui61IX9AhVaQaQEHeS4CkgQ_AUoAXoECAEQAw&biw=1536&bih=714&dpr=1.25'
find_urls(inp,url,driver, directory)
`
Sorry, I tried to keep the identation of the code but it doesn't work
Sorry, I tried to keep the indentation of the code but it doesn't work
That's because it looks like you used only one backtick above and below instead of three. Also, tip for you, after the first three backticks, identify the language so that there will be syntax highlighting like so:
```python
<your code here>
```
Your code would look like this (plus my best guess at correct indentations) then:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import time
import requests
import shutil
import os
import argparse
def save_img(inp,img,i, directory):
try:
filename = inp+str(i)+'.jpg'
response = requests.get(img,stream=True)
image_path = os.path.join(directory, filename)
with open(image_path, 'wb') as file:
shutil.copyfileobj(response.raw, file)
except Exception:
pass
def find_urls(inp,url,driver, directory):
driver.get(url)
img_urls = driver.find_elements(By.XPATH,'//img[contains(@Class,"rg_i yDmH0d")]')
#img_urls = driver.find_elements(By.XPATH,'//*[@id="yDmH0d"]')
for _ in range(500):
driver.execute_script("window.scrollBy(0,10000)")
try:
driver.find_element_by_css_selector('.mye4qd').click()
except:
continue
for j, imgurl in enumerate(img_urls):
try:
img = imgurl.get_attribute('src')
save_img(inp,img,j, directory)
time.sleep(1.5)
except:
pass
s=Service('C:\chromedriver.exe')
if name == "main":
parser = argparse.ArgumentParser(description='Scrape Google images')
parser.add_argument('-s', '--search', default='banana', type=str, help='search term')
parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
args = parser.parse_args()
driver = webdriver.Chrome(service=s)
directory = args.directory
inp = args.search
if not os.path.isdir(directory):
os.makedirs(directory)
url = 'https://www.google.com/search?
q=french+pedestrian+light&rlz=1C1CHBF_frFR1008FR1008&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiGgui61IX9AhVaQaQEHeS4CkgQ_AUoAXoECAEQAw&biw=1536&bih=714&dpr=1.25'
find_urls(inp,url,driver, directory)
Thank you. This works perfectly. But it only downloads thumbnails, not original images