geometry: margin=1in fontsize: 12pt linkcolor: cyan urlcolor: cyan ...
_Leadership Study Group: 2019-10-cf
https://media.giphy.com/media/FjfMN9MwuqvJe/giphy.gif - suspect is hatless, I repeat hatless | |
https://media.giphy.com/media/l0ExpaDR2IOTB2dAQ/giphy.gif - Dabbing mr. peanut | |
http://www.reactiongifs.com/r/trmp1.gif - Yeah Right Trump Faces | |
https://i.imgur.com/7XHSM7K.gif - Shitposting loudly | |
https://www.youtube.com/watch?v=DtRNg5uSKQ0 - fix it | |
http://i.imgur.com/GieYD.gif - well, hello there game changer | |
http://i.imgur.com/VF1p5XN.gif | |
https://i.imgur.com/eSVPDfw.gif - Parakeet explosion | |
https://media3.giphy.com/media/BWjTRoBsEKnII/giphy-downsized.gif - Top Men | |
https://i.imgur.com/MgtuAZQ.gif - Cold blooded |
soilmates |
import csv | |
def csv_reader(filename: str, operation: callable) -> None: | |
with open(filename,'rt') as f: | |
data = csv.reader(f) | |
for row in data: | |
counter += 1 | |
if counter < 1: | |
continue # skip first row, it's a header | |
operation(row) |
const fs = require('fs'); | |
const path = require('path'); | |
const csv = require('fast-csv'); | |
fs.createReadStream(path.resolve(__dirname, 'COVID-19/csse_covid_19_data/csse_covid_19_daily_reports/', '03-28-2020.csv')) | |
.pipe(csv.parse({ headers: true })) | |
.on('error', error => console.error(error)) | |
.on('data', row => { | |
const data = {} | |
data.province = row['Province_State'] || row['Province/State'] |
import express from 'express' | |
const app = express() | |
let myTimer = null | |
let online = false | |
const timeout = 30 * 60 * 1000 // number of milliseconds in 30 minutes | |
app.post('/pingme', (req,res) => { | |
online = true | |
clearTimeout(myTimer) |
https://www.udemy.com/course/2019-build-an-simple-online-examination-system-with-php7/ | |
https://www.udemy.com/course/3d-printing-ultimate-workshop-and-full-step-by-step-guide/ | |
https://www.udemy.com/course/android-application-engineer-practice-exam-for-2018/?couponCode=CF028BE8E99EF5E7EF05 | |
https://www.udemy.com/course/automate/?couponCode=NOV2019FREE | |
https://www.udemy.com/course/aws-certified-security-specialty-2019-exams/?couponCode=438CDFC92EBF1CD5A88A | |
https://www.udemy.com/course/aws-certified-solutions-architect-professional-2019-exams/?couponCode=8FBFF6E5040CC97542B7 | |
https://www.udemy.com/course/backtowar-photoshop-retouching/ | |
https://www.udemy.com/course/bootstrap-4-responsive-design-mobile-first-desde-cero/ | |
https://www.udemy.com/course/build-a-streaming-twitter-filter-with-python-and-redis/ |
import re | |
from urllib.request import urlretrieve | |
from bs4 import BeautifulSoup | |
page = open('index.html', 'r') | |
page_text = page.read() | |
soup = BeautifulSoup(page_text, 'html.parser') | |
zipfiles = soup.find_all('a', href=re.compile('\.pdf')) | |
for z in zipfiles: | |
url = z['href'] |
I hereby claim:
To claim this, I am signing this object:
function filterMails(mails, stat) { | |
let matchingPredicate = _.filter(mails, (mail) => { | |
let test = mail.predicate.split(' ') | |
stat.cases = _.map(test, (filter) => { | |
let operation = filter.split(/([=<>]{1,2})/g) | |
if (operation.length && operation.length === 3) { | |
let mainNum | |
let comparator | |
if (operation[0] === 'lastEntry') { | |
// TODO: parse dates |