Skip to content

Instantly share code, notes, and snippets.

View Abhayparashar31's full-sized avatar
:octocat:
Learning New Things

Abhay Parashar Abhayparashar31

:octocat:
Learning New Things
View GitHub Profile
from __future__ import division, print_function
# coding=utf-8
import os
import numpy as np
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
# Flask utils
from pynput.keyboard import Key, Controller,Listener
import time
keyboard = Controller()
keys=[]
def on_press(key):
global keys
#keys.append(str(key).replace("'",""))
def impute_nan(df,variable):
df[variable+"_random"]=df[variable]
##It will have the random sample to fill the na
random_sample=df[variable].dropna().sample(df[variable].isnull().sum(),random_state=0)
##pandas need to have same index in order to merge the dataset
random_sample.index=df[df[variable].isnull()].index #replace random_sample index with NaN values index
#replace where NaN are there
df.loc[df[variable].isnull(),variable+'_random']=random_sample
col=variable+"_random"
df = df.drop(col,axis=1)
import pandas as pd
df=pd.read_csv("data/titanic.csv",usecols=['Age','Cabin','Survived'])
df.isnull().mean()
df.dtypes
import requests
from bs4 import BeautifulSoup
import csv
######## SCRAPING QUOTES FROM ALL THE PAGES
page = 10 ## Number of Pages you want to scrape
for i in range(0,page):
res = requests.get(f"http://quotes.toscrape.com/page/{i}/")
soup = BeautifulSoup(res.text,"html.parser")
### Finding the Page Length
def amazon(book):
book = book.replace(" ",'+')
url = f'https://www.amazon.in/s?k=python&ref=nb_sb_noss_2'
print(url)
res = requests.get(url,headers=headers)
soup = BeautifulSoup(res.text,'html.parser')
names = soup.select(".a-size-medium")
for i in range(len(names)):
try:
price = soup.select(".a-spacing-top-small .a-price-whole")[i].get_text().strip()
def amazon(book,num):
book = book.replace(" ",'+')
for i in range(num):
url = f'https://www.amazon.in/s?k={book}&page={num}&qid=1599544528&ref=sr_pg_{num}'
res = requests.get(url,headers=headers)
soup = BeautifulSoup(res.text,'html.parser')
names = soup.select(".a-size-medium")
for i in range(len(names)):
lst = []
from bs4 import BeautifulSoup
import requests
import csv
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
def amazon(book,num):
book = book.replace(" ",'+')
for i in range(num):
url = f'https://www.amazon.in/s?k={book}&page={num}&qid=1599544528&ref=sr_pg_{num}'
res = requests.get(url,headers=headers)
soup = BeautifulSoup(res.text,'html.parser')
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
from selenium.webdriver.common.action_chains import ActionChains
PATH = 'chromedriver.exe'
var sheetName = 'Sheet1'
var scriptProp = PropertiesService.getScriptProperties()
function intialSetup () {
var activeSpreadsheet = SpreadsheetApp.getActiveSpreadsheet()
scriptProp.setProperty('key', activeSpreadsheet.getId())
}
function doPost (e) {
var lock = LockService.getScriptLock()