- Install gcc/g++ 7+
Add this line to
/etc/apt/sources.list
deb http://ftp.de.debian.org/debian buster main
And then install gcc/g++ 7
sudo apt-get install gcc-7 g++-7
sudo rm /usr/bin/gcc
sudo rm /usr/bin/g++
# vanilla version | |
loss += tf.math.reduce_mean(tf.math.square(labels - output)) | |
# improved version | |
weights = tf.cast(labels > 0, dtype=tf.float32) * 81 + 1 | |
loss += tf.math.reduce_mean(tf.math.square(labels - output) * weights) |
scale = 1 | |
size = 6 * sigma + 1 | |
x, y = tf.meshgrid(tf.range(0, 6*sigma+1, 1), tf.range(0, 6*sigma+1, 1), indexing='xy') | |
# the center of the gaussian patch should be 1 | |
center_x = size // 2 | |
center_y = size // 2 | |
# generate this 7x7 gaussian patch | |
gaussian_patch = tf.cast(tf.math.exp(-(tf.square(x - center_x) + tf.math.square(y - center_y)) / (tf.math.square(sigma) * 2)) * scale, dtype=tf.float32) |
# avoid invisible keypoints whose value are <= 0 | |
masked_keypoint_x = tf.boolean_mask(keypoint_x, keypoint_x > 0) | |
masked_keypoint_y = tf.boolean_mask(keypoint_y, keypoint_y > 0) | |
# find \left-most, top, bottom, and right-most keypoints | |
keypoint_xmin = tf.reduce_min(masked_keypoint_x) | |
keypoint_xmax = tf.reduce_max(masked_keypoint_x) | |
keypoint_ymin = tf.reduce_min(masked_keypoint_y) | |
keypoint_ymax = tf.reduce_max(masked_keypoint_y) |
{ | |
"joints_vis": [ | |
1, | |
1, | |
1, | |
1, | |
1, | |
1, | |
1, | |
1, |
def HourglassModule(inputs, order, filters, num_residual): | |
""" | |
One Hourglass Module. Usually we stacked multiple of them together. | |
https://github.com/princeton-vl/pose-hg-train/blob/master/src/models/hg.lua#L3 | |
inputs: | |
order: The remaining order for HG modules to call itself recursively. | |
num_residual: Number of residual layers for this HG module. | |
""" | |
# Upper branch |
/etc/apt/sources.list
deb http://ftp.de.debian.org/debian buster main
And then install gcc/g++ 7
sudo apt-get install gcc-7 g++-7
sudo rm /usr/bin/gcc
sudo rm /usr/bin/g++
sudo git clone https://github.com/vim/vim.git && cd vim
sudo ./configure --with-features=huge --enable-multibyte --enable-pythoninterp=yes --with-python-config-dir=/usr/lib/python2.7/config-x86_64-linux-gnu/ --enable-python3interp=yes --with-python3-config-dir=/usr/lib/python3.5/config-3.5m-x86_64-linux-gnu/ --enable-gui=gtk2 --enable-cscope --prefix=/usr/local/
"vundle
set nocompatible
filetype off
def train_step(images_a, images_b, epoch, step): | |
fake_a2b, fake_b2a, gen_loss_dict = train_generator(images_a, images_b) | |
fake_b2a_from_pool = fake_pool_b2a.query(fake_b2a) | |
fake_a2b_from_pool = fake_pool_a2b.query(fake_a2b) | |
dis_loss_dict = train_discriminator(images_a, images_b, fake_a2b_from_pool, fake_b2a_from_pool) | |
def train(dataset, epochs): | |
for epoch in range(checkpoint.epoch+1, epochs+1): |
@tf.function | |
def train_discriminator(images_a, images_b, fake_a2b, fake_b2a): | |
real_a = images_a | |
real_b = images_b | |
with tf.GradientTape() as tape: | |
# Discriminator A should classify real_a as A | |
loss_gan_dis_a_real = calc_gan_loss(discriminator_a(real_a, training=True), True) | |
# Discriminator A should classify generated fake_b2a as not A | |
loss_gan_dis_a_fake = calc_gan_loss(discriminator_a(fake_b2a, training=True), False) |
def make_discriminator_model(): | |
# C64-C128-C256-C512 | |
model = tf.keras.Sequential() | |
model.add(tf.keras.layers.Conv2D(64, (4, 4), strides=(2, 2), padding='same', input_shape=(256, 256, 3))) | |
model.add(tf.keras.layers.LeakyReLU(alpha=0.2)) | |
model.add(tf.keras.layers.Conv2D(128, (4, 4), strides=(2, 2), padding='same', use_bias=False)) | |
model.add(tf.keras.layers.BatchNormalization()) | |
model.add(tf.keras.layers.LeakyReLU(alpha=0.2)) |