Skip to content

Instantly share code, notes, and snippets.

View phil8192's full-sized avatar
🦇

Phil Stubbings phil8192

🦇
View GitHub Profile
def train_model(intercept_init, coef_init, X, y, epochs, lr, batch_size=None, randomise=True):
if batch_size is None or batch_size <= 0:
batch_size = X.shape[0]
classes = np.unique(y)
model = linear_model.SGDClassifier(loss='log', learning_rate='constant', eta0=lr, verbose=0)
set_weights(intercept_init, coef_init, classes, model)
batch_train(model, X, y, classes, epochs, batch_size, randomise)
return model
class Runner:
def __init__(self, X, y):
self.X = X
self.y = y
# called by FedAvg algo.
def optimise(self, intercept_init, coef_init, hyperparameters):
_intercept_init = intercept_init.copy()
_coef_init = coef_init.copy()
model = train_model(_intercept_init, _coef_init, self.X, self.y, **hyperparameters)
class FedAvg(BaseEstimator, ClassifierMixin):
def __init__(self,
n_runners=1,
sample_size=1,
rounds=1,
combine='weighted',
partition_params={
'scheme': 'uniform'
},
import phe as paillier
def encrypt(pub_key, x):
"""encrypt a vector with pub_key"""
return np.array([pub_key.encrypt(v) for v in x.tolist()])
def decrypt(pri_key, x):
"""decypt a vector with pri_key"""
return np.array([pri_key.decrypt(v) for v in x])
def taylor_loss(theta, x, y):
wx = np.dot(x, theta)
return 1/x.shape[0] * np.sum(np.log(2) - 0.5 * y * wx + 0.125 * wx**2)
def taylor_gradient(theta, x, y):
return 1/x.shape[0] * np.dot(0.25 * np.dot(x, theta) - 0.5 * y, x)
class A:
def __init__(self, x, y, b, pub_key=None):
self.x = x # A's vertical partition of X.
self.y = y # A's training labels.
self.b = b # reference to Host B.
self.features = x.shape[1]
self.pub_key = pub_key
# Called by Coordinator with current model Theta for each mini-batch
# returns (encrypted) gradients for Host A, Host B.
class B:
def __init__(self, x, pub_key=None):
self.x = x # Host B's X.
self.features = x.shape[1]
self.pub_key = pub_key
# Called by Host (A) with current model Theta and A's
# (encypted) part of the gradient calculation.
def gradients(self, theta, u):
class C:
def __init__(self, a, test_x, test_y, pri_key=None):
self.a = a # regerence to Host A.
self.test_x = test_x
self.test_y = test_y
self.features = test_x.shape[1]
self.pri_key = pri_key
def optimise(self, epochs, batch_size, eta, gamma):
epochs = 10
times = []
for k in [None, 256, 512, 1024, 2048]:
pub_key, pri_key = paillier.generate_paillier_keypair(n_length=k) \
if k is not None else (None, None)
b = B(B_x, pub_key)
a = A(A_x, yy, b, pub_key)