Skip to content

Instantly share code, notes, and snippets.

View sergiolucero's full-sized avatar
💭
coding the days away

Sergio Lucero sergiolucero

💭
coding the days away
View GitHub Profile
@sergiolucero
sergiolucero / slideshow.py
Last active January 31, 2019 16:30
bunch of pics to ppt
import glob, os, pptx, scipy.misc
from PIL import Image
size = 800,600
prs = pptx.Presentation()
for fn in glob.glob('*.jpg'):
img = Image.open(fn)
img.thumbnail(size, Image.ANTIALIAS)
img.save(fn)
@sergiolucero
sergiolucero / train_conductor_world_map.py
Last active September 4, 2021 12:27
building a realistic Train Conductor World Map
import folium, random, pandas as pd
from urllib.request import urlopen
colors=['red','blue','green','orange','yellow']
URL = 'http://sergiolucero.carto.com/api/v2/sql?format=csv&q=SELECT%20*,ST_AsText(the_geom)%20AS%20g2%20FROM%20'
ZOOM_LEVEL = 5;TILES='cartodbdark_matter'
CARTO_DATABASE = 'tcw'
response = urlopen(URL+CARTO_DATABASE)
data = pd.read_csv(response)
@sergiolucero
sergiolucero / cplex_demo.py
Last active December 4, 2017 20:05
calling IBM CPLEX
from forestry import ForestryLinearProgram # this is project-specific
from PuLP.solvers import CBC # open-source LinearProgramming library and solver
from creds import BASE_URL, API_KEY # these are used to id with IBM
from docloud.job import JobClient
my_problem = ForestryLinearProgram('forestry_test.xlsx') # input file contains all relevant tree data
#my_problem.solve(CBC) # this is how we used to work, until our problems grew too big for open-source
my_problem.save_to_MPS('forestry_LP.mps') # here we export to a format that IBM CPLEX can recognize
@sergiolucero
sergiolucero / fplot_clusters.py
Created September 1, 2017 12:12
Plotting bike clusters
import folium
from util import zload
import pandas as pd
cluster_data = zload('Labels_16.il')
loc_data = pd.read_excel('georef.xlsx')
loc_dict = loc_data.T.to_dict() # transformed
centroid = (loc_data.lat.median(),loc_data.lon.median())
bikemap = folium.Map(location=centroid,tiles='cartodbpositron',zoom_start=14)
@sergiolucero
sergiolucero / copscraping.py
Created September 24, 2017 15:23
scraping from the cops
from selenium import webdriver
url='http://consultawebvehiculos.carabineros.cl/index.php'
fd = webdriver.Firefox()
plates = ['CZJB81','BDPW78']
sections = ['txtLetras','txtNumeros1','txtNumeros2']
def check_robo(patente):
fd.get(url) # point to the starting page again
patsplit = [patente[:2],patente[2:4], patente[4:]]
for ix, sec in enumerate(sections):
@sergiolucero
sergiolucero / georef.py
Created September 24, 2017 16:02
basic georeferencing
import googlemaps
GMAPS_API = 'AIzaSyBjB7GbQ2TL2SUW989uu7ZoP6S0bxsv3t8' # EKHOS July 2017
gmaps = googlemaps.Client(key=GMAPS_API)
gref = gmaps.geocode('Emilio Vaisse 564, Providencia')
print(gref[0]['geometry'])
@sergiolucero
sergiolucero / airquality.py
Created September 26, 2017 16:15
openAQ air quality
import openaq
parameters = ['co', 'no2', 'o3', 'pm10', 'pm25', 'so2']
api = openaq.OpenAQ()
country_list = api.countries(df=True)
print('MONITORING %d locations worldwide' %(country_list.locations.sum()))
for cid, cdata in country_list.iterrows():
ctry = cdata.name
@sergiolucero
sergiolucero / plot_parques.py
Last active September 27, 2017 14:55
parques de santiago
import geopandas as gp
import folium
# Fuentes SHAPEFILES... http://www.ide.cl/descarga/capas.html
# src: https://ocefpaf.github.io/python4oceanographers/blog/2015/02/02/cartopy_folium_shapefile/
@sergiolucero
sergiolucero / pandareader.py
Created October 2, 2017 14:26
Panda dreams
import pandas as pd
from pandasqlutils import * # to easily cross tables
bikes = pd.read_url('http://quant.cl/db/bikes') # serving from quant via sqlite
airquality = pd.read_url('s3://amazonxyz.aws.com/sdakjldajds') # using an S3 bucket
weather = pd.read_url('quant.cl/postgres/weather' # running on a docker (Rad?)
airvsclimate = cross(airquality, weather)
print(airvsclimate.head())
@sergiolucero
sergiolucero / pullwikipop.py
Created October 14, 2017 19:48
pull population data from Wikipedia
import wikipedia
CITIES=['Paris','Barcelona','Tokyo', 'New York City','Amsterdam','Copenhaguen','San Francisco']
AMBIGUOUS_CITIES = ['Santiago']
for city in CITIES:
citywiki = wikipedia.page(city)
cwsum = citywiki.summary
poploc = cwsum.index('population') # first and only? use re!
print(city, cwsum[poploc:poploc+30])