Skip to content

Instantly share code, notes, and snippets.

@evmcheb
evmcheb / suburbs.py
Created July 4, 2019 13:13
estimates suburbs to a square
import json
with open('wa.json') as f:
data = json.load(f)
out = {}
for feature in data['features']:
out[feature["properties"]["wa_local_2"].lower()] = {}
out[feature["properties"]["wa_local_2"].lower()]["lat"] = [min([i[1] for i in feature["geometry"]["coordinates"][0][0]]), max([i[1] for i in feature["geometry"]["coordinates"][0][0]])]
out[feature["properties"]["wa_local_2"].lower()]["lon"] = [min([i[0] for i in feature["geometry"]["coordinates"][0][0]]), max([i[0] for i in feature["geometry"]["coordinates"][0][0]])]
@evmcheb
evmcheb / overpass.py
Created July 4, 2019 13:28
Creates an overpass ql query to find roads in suburb boundaries
for street in data[todays_key]:
for passing_suburb in data[todays_key][street]:
query += """node["name"="{}"](-32.398,115.64,-31.6248,116.2796)({}, {}, {}, {});\n""".format(street,
suburb_min_max[passing_suburb]["lat"][0], suburb_min_max[passing_suburb]["lon"][0],
suburb_min_max[passing_suburb]["lat"][1], suburb_min_max[passing_suburb]["lon"][1])
query += """way["name"="{}"](-32.398,115.64,-31.6248,116.2796)({}, {}, {}, {});\n""".format(street,
suburb_min_max[passing_suburb]["lat"][0], suburb_min_max[passing_suburb]["lon"][0],
suburb_min_max[passing_suburb]["lat"][1], suburb_min_max[passing_suburb]["lon"][1])
query += """relation["name"="{}"](-32.398,115.64,-31.6248,116.2796)({}, {}, {}, {});\n""".format(street,
@evmcheb
evmcheb / app.js
Last active July 6, 2019 17:18
Nodejs functions for sending static mapbox images
const apiEndpoint = "https://api.mapbox.com";
const accessToken="?access_token=[redacted]";
function sendPostcodeCameras(recipientID, postcode){
const no_cache = (Math.random() * (0.0005 - 0.0001) + 0.0001)
const suburbLat = (postcodes[postcode]["lat"] + no_cache).toFixed(4)
const suburbLon = (postcodes[postcode]["lon"] + no_cache).toFixed(4)
const allUrl = `/styles/v1/cheb/cjxmvvcys26wx1cogeo3ybh7j/static/${suburbLon},${suburbLat},12,0,0/1000x1000@2X`;
@evmcheb
evmcheb / alljson.json
Created July 10, 2019 10:40
speed cameras to july 18
{
"8 July 2019": {
"Beechboro Road North": [
"beechboro"
],
"Berwick Street": [
"east victoria park"
],
"Brook Road": [
"kenwick"
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@evmcheb
evmcheb / simulate_rainforest.py
Created October 11, 2019 10:22
using scaper to simulate chainsaw noises in a forest
import scaper, os
soundscape_duration = 30.0
outfolder = 'soundscapes/'
foreground = os.path.expanduser("foreground")
background = os.path.expanduser("background")
sc = scaper.Scaper(soundscape_duration, foreground, background)
sc.add_background(label=("const", "rainforest"),
source_file=("choose", []),
source_time=("uniform", 0, 300-soundscape_duration))
@evmcheb
evmcheb / extract_spectrogram.py
Created October 11, 2019 13:34
leverage the librosa python library to extract a spectrogram
import librosa.display, os, gc
import numpy as np
import matplotlib.pyplot as plt
def extract_spectrogram(fname, iname):
audio, sr = librosa.load(fname, res_type='kaiser_fast')
S = librosa.feature.melspectrogram(audio, sr=sr, n_mels=128)
log_S = librosa.power_to_db(S, ref=np.max)
fig = plt.figure(figsize=[1, 1])
ax = fig.add_subplot(111)
import os
samples = []
labels = []
images_folder = "images/"
for image in os.listdir(images_folder):
samples.append(img_to_array(load_img(images_folder+image, target_size=(100, 100))))
if "normal" in image:
labels.append((0))
else:
labels.append((1))
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(100, 100, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
# Grabs the best comments and splits into parts
import praw, re
r = praw.Reddit()
thread = r.submission(url=URL)
thread.comment_sort = 'best' # get the best comments from the thread
for comment in thread.comments:
punctuation_reg = re.compile('(?<=[.!,?:;-]) +')
split_parts = punctuation_reg.split(comment.body)