Skip to content

Instantly share code, notes, and snippets.

@johnhw
Created August 5, 2018 12:38
Show Gist options
  • Save johnhw/be6baa1adfceedd73b7e3e43ecb4e78d to your computer and use it in GitHub Desktop.
Save johnhw/be6baa1adfceedd73b7e3e43ecb4e78d to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# force backend to tensorflow\n",
"import os\n",
"os.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\n",
"\n",
"from keras.layers import Dense, Input\n",
"from keras.layers import Add\n",
"from keras.models import Model\n",
"from keras.regularizers import Regularizer\n",
"from keras import backend as K\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class SimpleRegularizer(Regularizer):\n",
" def __init__(self, input_weights):\n",
" # this should be a Tensor, from\n",
" # dense.trainable_weights\n",
" self.input_tensor = input_weights \n",
" \n",
" def __call__(self, W):\n",
" # return sse difference between these weights and the\n",
" # weights it is looking at \n",
" return K.sum((self.input_tensor - W)**2)\n",
" \n",
" def get_config(self): \n",
" return {}\n",
" \n",
"# this is a simple network:\n",
"# input -> dense -> add -> output\n",
"# -> dense_regularised |\n",
"#\n",
"# It's not important that it has this topology, this is just an example\n",
" \n",
"input_layer = Input(shape=(4,))\n",
"\n",
"# NOTE: we separate creating the layer and then applying it to a specific\n",
"# input, to get access to the layer variable directly, so we can get at its weights\n",
"dense = Dense(8) # dense layer, we will use the weights in the next layer\n",
"dense_output = dense(input_layer) # the layer, applied to some input\n",
"\n",
"# now apply a regularizer, which should converge to being equal in weights\n",
"# to the first dense layer\n",
"dense_regularised = Dense(8, kernel_regularizer=SimpleRegularizer(dense.trainable_weights[0]))\n",
"dense_regularised_output = dense_regularised(input_layer)\n",
"\n",
"# now create an output by summing the outputs of the two dense layers\n",
"output_layer = Add()([dense_output, dense_regularised_output])\n",
"\n",
"model = Model(input_layer, output_layer)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# some random \"training\" data\n",
"X = np.random.normal(0,1,(1000,4))\n",
"Y = np.random.normal(0,1,(1000,8))\n",
"model.compile(optimizer='adam', loss='mse')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# fit the model\n",
"model.fit(X,Y, epochs=100)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# these will be similar arrays, if things have converged\n",
"print(dense.get_weights())\n",
"print(dense_regularised.get_weights())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# compute sse between weight matrices -- should be close\n",
"print(np.sum((dense.get_weights()[0] - dense_regularised.get_weights()[0])**2))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment