Skip to content

Instantly share code, notes, and snippets.

View StrikingLoo's full-sized avatar
😄

Luciano StrikingLoo

😄
View GitHub Profile
linear_search_tr :: (Eq a) => [a] -> a -> Bool -> Bool
linear_search_tr [] _ present = present
linear_search_tr (first_e:rest) e present = linear_search_tr rest e (present||first_e==e)
linear_search :: (Eq a) => [a] -> a -> Bool
linear_search a_list an_element = linear_search_tr a_list an_element False
#not really tail recursive!
def factorial(n):
if n==0:
return 1
else:
return n*factorial(n-1)
#tail recursive version
def factorial(n, acum=1):
if n==0:
#returns only the rows where x is >5, by reference (writing on them alters original df)
df2 = df.loc[df['x'] > 5]
#returns only the rows where x is 0,1,2,3 or 4, by reference
df3 = df.x.isin(range(4))
#returns only the rows where x is >5, by read-only reference (can't be written on)
df4 = df[df['x']>5]
def get_big_mean():
return dfn.salary.mean().compute()
def get_big_mean_old():
return df3.salary.mean()
def get_big_max():
return dfn.salary.max().compute()
def get_big_max_old():
return df3.salary.max()
def f(x):
return (13*x+5)%7
def apply_random_old():
df3['random']= df3['salary'].apply(f)
def apply_random():
dfn['random']= dfn['salary'].apply(f).compute()
import scrapy
#must inherit from scrapy.Spider
class KittensSpider(scrapy.Spider):
#can be any string, will be used to call from the console
name = "kitten_getter"
# This method must be in the spider,
# and will be automatically called by the crawl command.
def start_requests(self):
# Just giving the images a little bit of format, and shrinking them.
def html_img_tag(img_url):
return '<img width=\'30%\' height=\'30%\' src=\''+ img_url +'\' > </img><br/> '
def parse(self,response):
final = ''
for i in response.xpath('//a/@href'):
img_url = i.extract()
if img_url.endswith('.jpg') or img_url.endswith('.png'):
final+= html_img_tag(img_url)
def ascii_encode(img_url):
return unicodedata.normalize('NFKD', img_url).encode('ascii','ignore')
def download_pictures(self, response):
image_urls = get_image_urls(response)
for img_url in image_urls:
self.index+=1
print(img_url)
ascii_url = ascii_encode(img_url)
img_data = requests.get(ascii_url).content
- - top defenders - -
Dragon: 0.004
Fairy: 0.008
Fire: 0.011
Normal: 0.012
Water: 0.012
Steel: 0.015
Flying: 0.015
Psychic: 0.017
Ghost: 0.019
# Expected Dealt Damage
Normal : 1160
Dragon : 1236
Poison : 1239
Ghost : 1258
Steel : 1260
Psychic : 1263
Grass : 1264
Bug : 1279
Dark : 1313