Skip to content

Instantly share code, notes, and snippets.

View IlievskiV's full-sized avatar
🐻‍❄️

Vladimir Ilievski IlievskiV

🐻‍❄️
View GitHub Profile
@IlievskiV
IlievskiV / julia_set.py
Created September 6, 2020 10:08
Implementation of the Julia Set convergence
def julia_quadratic(zx, zy, cx, cy, threshold):
"""Calculates whether the number z[0] = zx + i*zy with a constant c = x + i*y
belongs to the Julia set. In order to belong, the sequence
z[i + 1] = z[i]**2 + c, must not diverge after 'threshold' number of steps.
The sequence diverges if the absolute value of z[i+1] is greater than 4.
:param float zx: the x component of z[0]
:param float zy: the y component of z[0]
:param float cx: the x component of the constant c
:param float cy: the y component of the constant c
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
x_start, y_start = -2, -2 # an interesting region starts here
width, height = 4, 4 # for 4 units up and right
density_per_unit = 200 # how many pixles per unit
# real and imaginary axis
re = np.linspace(x_start, x_start + width, width * density_per_unit )
@IlievskiV
IlievskiV / riemann_stieltjes_integration.py
Created October 2, 2020 10:48
Implementation of the Riemann-Stieltjes Integration in Python
def derivative(f, a, h=0.01):
'''Approximates the derivative of the function f in a
:param function f: function to differentiate
:param float a: the point of differentiation
:param float h: step size
:return float: the derivative of f in a
'''
return (f(a + h) - f(a - h))/(2*h)
@IlievskiV
IlievskiV / pretrained_bert.py
Created January 15, 2021 09:33
How to use a pre-trained BERT
from transformers import BertTokenizer, BertModel
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
inputs = tokenizer("[CLS] This is very awesome!", return_tensors="pt")
outputs = model(**inputs)
# the learned representation for the [CLS] token
import numpy as np
powers_of_two = np.array([[4], [2], [1]]) # shape (3, 1)
def step(x, rule_binary):
"""Makes one step in the cellular automaton.
Args:
x (np.array): current state of the automaton
rule_binary (np.array): the update rule
import numpy as np
def cellular_automaton(rule_number, size, steps,
init_cond='random', impulse_pos='center'):
"""Generate the state of an elementary cellular automaton after a pre-determined
number of steps starting from some random state.
Args:
rule_number (int): the number of the update rule to use
size (int): number of cells in the row
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 20000 # vocabulary size
maxlen = 100 # max length of every input sequence
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
x_train, y_train = x_train[:2500], y_train[:2500]
x_test, y_test = x_test[:1000], y_test[:1000]
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
from keras.models import Sequential
from keras.layers import Activation, Bidirectional, Conv1D, Dense
from keras.layers import Dropout, Embedding, LSTM, MaxPooling1D
def make_model(
embedding_dim: int,
dropout: float,
filters: int,
kernel_size: int,
from keras.metrics import BinaryAccuracy, Precision, Recall
METRICS = [
BinaryAccuracy(name='accuracy'),
Precision(name='precision'),
Recall(name='recall'),
] # metrics to track
# hyperparameters to track
embedding_size = [32, 128]
import itertools
epochs = 3 # number of training epochs
test_batch_size = 32 # batch size for testing
arrays = [
embedding_size,
dropout,
filters,
kernel_size,
pool_size,