This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import json | |
with open('wa.json') as f: | |
data = json.load(f) | |
out = {} | |
for feature in data['features']: | |
out[feature["properties"]["wa_local_2"].lower()] = {} | |
out[feature["properties"]["wa_local_2"].lower()]["lat"] = [min([i[1] for i in feature["geometry"]["coordinates"][0][0]]), max([i[1] for i in feature["geometry"]["coordinates"][0][0]])] | |
out[feature["properties"]["wa_local_2"].lower()]["lon"] = [min([i[0] for i in feature["geometry"]["coordinates"][0][0]]), max([i[0] for i in feature["geometry"]["coordinates"][0][0]])] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
for street in data[todays_key]: | |
for passing_suburb in data[todays_key][street]: | |
query += """node["name"="{}"](-32.398,115.64,-31.6248,116.2796)({}, {}, {}, {});\n""".format(street, | |
suburb_min_max[passing_suburb]["lat"][0], suburb_min_max[passing_suburb]["lon"][0], | |
suburb_min_max[passing_suburb]["lat"][1], suburb_min_max[passing_suburb]["lon"][1]) | |
query += """way["name"="{}"](-32.398,115.64,-31.6248,116.2796)({}, {}, {}, {});\n""".format(street, | |
suburb_min_max[passing_suburb]["lat"][0], suburb_min_max[passing_suburb]["lon"][0], | |
suburb_min_max[passing_suburb]["lat"][1], suburb_min_max[passing_suburb]["lon"][1]) | |
query += """relation["name"="{}"](-32.398,115.64,-31.6248,116.2796)({}, {}, {}, {});\n""".format(street, |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
const apiEndpoint = "https://api.mapbox.com"; | |
const accessToken="?access_token=[redacted]"; | |
function sendPostcodeCameras(recipientID, postcode){ | |
const no_cache = (Math.random() * (0.0005 - 0.0001) + 0.0001) | |
const suburbLat = (postcodes[postcode]["lat"] + no_cache).toFixed(4) | |
const suburbLon = (postcodes[postcode]["lon"] + no_cache).toFixed(4) | |
const allUrl = `/styles/v1/cheb/cjxmvvcys26wx1cogeo3ybh7j/static/${suburbLon},${suburbLat},12,0,0/1000x1000@2X`; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"8 July 2019": { | |
"Beechboro Road North": [ | |
"beechboro" | |
], | |
"Berwick Street": [ | |
"east victoria park" | |
], | |
"Brook Road": [ | |
"kenwick" |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import scaper, os | |
soundscape_duration = 30.0 | |
outfolder = 'soundscapes/' | |
foreground = os.path.expanduser("foreground") | |
background = os.path.expanduser("background") | |
sc = scaper.Scaper(soundscape_duration, foreground, background) | |
sc.add_background(label=("const", "rainforest"), | |
source_file=("choose", []), | |
source_time=("uniform", 0, 300-soundscape_duration)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import librosa.display, os, gc | |
import numpy as np | |
import matplotlib.pyplot as plt | |
def extract_spectrogram(fname, iname): | |
audio, sr = librosa.load(fname, res_type='kaiser_fast') | |
S = librosa.feature.melspectrogram(audio, sr=sr, n_mels=128) | |
log_S = librosa.power_to_db(S, ref=np.max) | |
fig = plt.figure(figsize=[1, 1]) | |
ax = fig.add_subplot(111) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
samples = [] | |
labels = [] | |
images_folder = "images/" | |
for image in os.listdir(images_folder): | |
samples.append(img_to_array(load_img(images_folder+image, target_size=(100, 100)))) | |
if "normal" in image: | |
labels.append((0)) | |
else: | |
labels.append((1)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from keras.models import Sequential | |
from keras.layers import Dense, Dropout, Activation, Flatten | |
from keras.layers import Convolution2D, Conv2D, MaxPooling2D, GlobalAveragePooling2D | |
model = Sequential() | |
model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(100, 100, 3), activation='relu')) | |
model.add(MaxPooling2D(pool_size=2)) | |
model.add(Dropout(0.2)) | |
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Grabs the best comments and splits into parts | |
import praw, re | |
r = praw.Reddit() | |
thread = r.submission(url=URL) | |
thread.comment_sort = 'best' # get the best comments from the thread | |
for comment in thread.comments: | |
punctuation_reg = re.compile('(?<=[.!,?:;-]) +') | |
split_parts = punctuation_reg.split(comment.body) |
OlderNewer