Skip to content

Instantly share code, notes, and snippets.

View NMZivkovic's full-sized avatar

Nikola Živković NMZivkovic

View GitHub Profile
public BikeSharingDemandPrediction Predict(BikeSharingDemandSample sample)
{
return _predictionEngine.Predict(sample);
}
public RegressionMetrics Evaluate(string testDataLocation)
{
var testDataView = _mlContext.Data.LoadFromTextFile<BikeSharingDemandSample>(
path: testDataLocation,
hasHeader: true,
public Model(MLContext mlContext, IEstimator<ITransformer> algorythm, string trainingDataLocation)
{
_mlContext = mlContext;
_algorythim = algorythm;
_trainingDataView = _mlContext.Data.LoadFromTextFile<BikeSharingDemandSample>(
path: trainingDataLocation,
hasHeader: true,
separatorChar: ',');
Name = algorythm.GetType().ToString().Split('.').Last();
public void BuildAndFit()
{
var pipeline = _mlContext.Transforms.CopyColumns(inputColumnName: "Count", outputColumnName: "Label")
.Append(_mlContext.Transforms.Categorical.OneHotEncoding("Season"))
.Append(_mlContext.Transforms.Categorical.OneHotEncoding("Year"))
.Append(_mlContext.Transforms.Categorical.OneHotEncoding("Holiday"))
.Append(_mlContext.Transforms.Categorical.OneHotEncoding("Weather"))
.Append(_mlContext.Transforms.Concatenate("Features",
"Season",
"Year",
using BikeSharingDemand.BikeSharingDemandData;
using Microsoft.ML;
using Microsoft.ML.Data;
using System.Linq;
namespace BikeSharingDemand.ModelNamespace
{
public sealed class Model
{
private readonly MLContext _mlContext;
using Microsoft.ML.Data;
namespace BikeSharingDemand.BikeSharingDemandData
{
public class BikeSharingDemandSample
{
[LoadColumn(2)] public float Season;
[LoadColumn(3)] public float Year;
[LoadColumn(4)] public float Month;
[LoadColumn(5)] public float Hour;
for e in range(0, num_of_episodes):
# Reset the enviroment
state = enviroment.reset()
state = np.reshape(state, [1, 1])
# Initialize variables
reward = 0
terminated = False
bar = progressbar.ProgressBar(maxval=timesteps_per_episode/10, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
optimizer = Adam(learning_rate=0.01)
agent = Agent(enviroment, optimizer)
batch_size = 32
num_of_episodes = 100
timesteps_per_episode = 1000
agent.q_network.summary()
def retrain(self, batch_size):
minibatch = random.sample(self.expirience_replay, batch_size)
for state, action, reward, next_state, terminated in minibatch:
target = self.q_network.predict(state)
if terminated:
target[0][action] = reward
else:
def act(self, state):
if np.random.rand() <= self.epsilon:
return enviroment.action_space.sample()
q_values = self.q_network.predict(state)
return np.argmax(q_values[0])
def _build_compile_model(self):
model = Sequential()
model.add(Embedding(self._state_size, 10, input_length=1))
model.add(Reshape((10,)))
model.add(Dense(50, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(self._action_size, activation='linear'))
model.compile(loss='mse', optimizer=self._optimizer)
return model