Skip to content

Instantly share code, notes, and snippets.

View hackintoshrao's full-sized avatar
📚
Exploring AI agents on code search and understanding

Karthic Rao hackintoshrao

📚
Exploring AI agents on code search and understanding
View GitHub Profile
@hackintoshrao
hackintoshrao / docker-compose.yml
Created March 5, 2018 08:25
Docker compose configuration for runnig cofluent kafka and exposing the services to localhost
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:3.2.1
ports:
- 32181:32181
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
from keras import optimizers
model = Sequential()
model.add(Dense(64, kernel_initializer='uniform', input_shape=(10,)))
model.add(Activation('tanh'))
model.add(Activation('softmax'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
def parameter_optimize(x1, x2, y, w1=w1,w2=w2, b=b, learning_rate = learning_rate):
# X contains the 100 student records.
# Iterate through each record.
for i in range(len(x1)):
# Make prediction using the initial values of w1, w2, b.
y_hat = find_perceptron_prediction(x1[i], x2[i], w1, w2, b)
# Case where the red points are wrongly classified.
# This is the case where the actual output is 0 but the prediction is 1.
if y[i] != y_hat and y[i] == 0:
# Reduce the values of parameters.
@hackintoshrao
hackintoshrao / optimize.py
Created February 19, 2018 13:21
optimizing for case 1 and 2.
def parameter_optimize(x1, x2, y, w1=w1,w2=w2, b=b, learning_rate = learning_rate):
# X contains the 100 student records.
# Iterate through each record.
for i in range(len(x1)):
# Make prediction using the initial values of W[0], W[1], b.
y_hat = find_perceptron_prediction(x1[i], x2[i], w1, w2, b)
# Case where the red points are wrongly classified.
# This is the case where the actual output is 0 but the prediction is 1.
if y[i] != y_hat and y[i] == 0:
w1 = w1 - test_scores[i] * learning_rate
@hackintoshrao
hackintoshrao / parameter_green_update.py
Last active February 19, 2018 16:18
Parameter optimization for the case where the points with label 1 are misclasfied
def parameter_optimize(x1, x2, y, w1=w1, w2=w2, b=b, learning_rate=learning_rate):
# X contains the 100 student records.
# Iterate through each record.
for i in range(len(x1)):
# Make prediction using the initial values of w1, w2, b.
y_hat = find_perceptron_prediction(x1[i], x2[i], w1, w2, b)
# Case where the green points are wrongly classified.
# This is the case where the actual output is 1 but the prediction is 0.
if y[i] != y_hat and y[i] == 1:
# slowly increase the values of the parameters.
def parameter_optimize(x1, x2, y, w1=w1, w2=w2, b=b, learning_rate=learning_rate):
# X contains the 100 student records.
# Iterate through each record.
for i in range(len(x1)):
# Make prediction using the initial values of w1, w2, b.
y_hat = find_perceptron_prediction(x1[i], x2[i], w1, w2, b)
# Case where the red points are wrongly classified.
# This is the case where the actual output is 0 but the prediction is 1.
if y[i] != y_hat and y[i] == 0:
# slowly reduce the parameter values to move the line towards red cluster.
@hackintoshrao
hackintoshrao / and_perceptron.py
Last active January 31, 2018 07:24
The AND perceptron code with wrong parameters.
from collections import OrderedDict
import numpy as np
# Setting the values for parameters w1, w2 and b.
# Try the code with various parameter values.
w1 = 3
w2 = 2
b = -1
@hackintoshrao
hackintoshrao / scatter.py
Last active January 28, 2018 17:22
The scatter plot of university admission data.
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# extract data frame columns into numpy array.
test_scores = data["test_scores"].values
grades = data["grades"].values
label = data["accepted"].values
@hackintoshrao
hackintoshrao / process.py
Created January 27, 2018 04:19
Adding simple rule engine to send appriopriate response
def processRequest(req):
# Parsing the POST request body into a dictionary for easy access.
req_dict = json.loads(request.data)
entity_type = ""
entity_value = ""
speech = ""
# Accessing the fields on the POST request boduy of API.ai invocation of the webhook
intent = req_dict["result"]["metadata"]["intentName"]
entity_key_val = req_dict["result"]["parameters"]
@hackintoshrao
hackintoshrao / app.py
Created January 27, 2018 03:39
Parsing the entity type, entity value and intent from the api.ai webhook request
# -*- coding:utf8 -*-
# !/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#