Skip to content

Instantly share code, notes, and snippets.

View netsatsawat's full-sized avatar

Satsawat Natakarnkitkul (Net) netsatsawat

View GitHub Profile
@netsatsawat
netsatsawat / sgd_sklearn.py
Created August 5, 2020 10:10
Snippet of SGD regressor implementation using sklearn
from sklearn.linear_model import SGDRegressor
SGD_rgs_normal = SGDRegressor(fit_intercept=True, random_state=SEED, eta0=learning_rate,
learning_rate='constant', max_iter=n_epochs)
SGD_rgs_normal.fit(X, y)
print(SGD_rgs_normal)
print(f'Intercept: {SGD_rgs_normal.intercept_}, weights: {SGD_rgs_normal.coef_}')
y_pred = SGD_rgs_normal.predict(X)
_ = print_regress_metric(y, y_pred)
@netsatsawat
netsatsawat / stochastic_gd_regression.py
Created August 5, 2020 09:44
Snippet of Stochastic gradient descent
def _iter(X, y,
batch_size: int=1):
n_observations = X.shape[0]
idx = list(range(n_observations))
random.shuffle(idx)
for batch_id, i in enumerate(range(0, n_observations, batch_size)):
_pos = np.array(idx[i: min(i + batch_size, n_observations)])
yield batch_id, X.take(_pos, axis=0), y.take(_pos)
@netsatsawat
netsatsawat / batch_gd.py
Last active August 5, 2020 09:21
Snippet code of batch gradient descent
def batch_gradient_descent(learning_rate, X, y, epochs: int,
return_model_result: bool=True):
# initial outputs
mse_ = []
cost_ = []
theta_ = []
n = X.shape[0]
theta = np.ones(X.shape[1]) # set default weights
X_transpose = X.T
for i in range(0, epochs):
@netsatsawat
netsatsawat / single_linear_regression.py
Last active April 4, 2022 10:07
Example of single linear regression (closed form equation)
import numpy as np
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
# generate sample data (single linear)
X = 2 * np.random.rand(200, 1)
y = 1.2 * X + 1 + 0.8 * np.random.randn(200, 1)
X_ = sm.add_constant(X) # add constant for intercept computation
print('Method 1: matrix formulation')
@netsatsawat
netsatsawat / nlms_stock_production.py
Created July 21, 2020 11:53
Partial example of utilizing adaptive filter as real-time model prediction
s_future = ts['Close'].values.flatten()[1000: ]
filter_min_error = pa_list[np.argmin(error_list)]
print(f'Selected the filter with mu of {filter_min_error.mu}')
print(f'with avg eror of {error_list[np.argmin(error_list)]}')
x_future = pa.input_from_history(s_future, n)
d_future = np.zeros(len(x_future))
N_future = len(x_future)
for i, k in enumerate(range((n-1), N_future)):
d_future[i] = s_future[k+1]
@netsatsawat
netsatsawat / nlms_stock_data.py
Created July 21, 2020 11:43
Partial example of NLMS filter on sample stock data
import pandas as pd
import numpy as np
import padasip as pa
SEED = 121
np.random.seed(SEED)
n = 5
s = ts['Close'].values.flatten()[: 1000] # initial timeseries data
x = pa.input_from_history(s, n) # input matrix
@netsatsawat
netsatsawat / adaptive_filter_mock_data_example.py
Last active March 30, 2021 22:14
Basic example using mock data
import numpy as np
import padasip as pa
import matplotlib.pylab as plt
# prep data
N = 200 # the overall time series size
n = 5 # size of sample we want to feed into the filter
s = np.random.random(N) # generate the source input
d = np.zeros(N) # initialize the target array
for k in range((n-1), N):
fig, axes = plt.subplots(2, 1, figsize=(14, 6))
plt.subplots_adjust(hspace=.5)
axes[0].set_title(f'Wave with a frequency of {signal1} Hz')
axes[0].plot(tm, ampl1)
axes[0].set_xlabel('Time')
axes[0].set_ylabel('Amplitude')
ft_ = np.fft.fft(ampl1) / len(ampl1) # Normalize amplitude and apply the FFT
ft_ = ft_[range(int(len(ampl1)/2))] # Exclude sampling frequency
tp_cnt = len(ampl1)
val_ = np.arange(int(tp_cnt / 2))
import numpy as np
import matplotlib.pyplot as plt
start_tm, end_tm = 0, 2
signal1 = 12 # frequency of the wave
smpl_freq = 32 * signal1 # sampling frequency with oversampling factor=32
smpl_intv = 1 / smpl_freq # intervals time points are sampled
tm = np.arange(start_tm, end_tm, smpl_intv)
ampl1 = np.sin(2 * np.pi * signal1 * tm) # generate sine wave
@netsatsawat
netsatsawat / dataForCluster.py
Created October 13, 2019 19:08
Generate data blob for clustering tutorial
from sklearn.datasets import make_blobs
import numpy as np
SEED = 123
N_SAMPLES = 10000
N_FEATURES = 2
N_CENTERS = 5
np.random.seed(SEED)
X, y = make_blobs(n_samples=N_SAMPLES, n_features=N_FEATURES, centers=N_CENTERS,