Skip to content

Instantly share code, notes, and snippets.

View pranjalAI's full-sized avatar

Pranjal Saxena pranjalAI

View GitHub Profile
class ResidualSep(nn.Module):
def __init__(self, channels, dilation=1):
super().__init__()
self.blocks = nn.Sequential(
nn.ReLU(),
nn.ReflectionPad2d(dilation),
nn.Conv2d(channels, channels, kernel_size=3, stride=1,
padding=0, dilation=dilation,
groups=channels, bias=False),
# Convert the model.
converter = tf.lite.TFLiteConverter.from_keras_model(generator)
tflite_model = converter.convert()
@tf.function
def train_step(images):
seed = tf.random.normal([BATCH_SIZE, SEED_SIZE])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(seed, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
def build_discriminator(image_shape):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=image_shape,
padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
def build_generator(seed_size, channels):
model = Sequential()
model.add(Dense(4*4*256,activation="relu",input_dim=seed_size))
model.add(Reshape((4,4,256)))
model.add(UpSampling2D())
model.add(Conv2D(256,kernel_size=3,padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.eval()
dummy_input = torch.randn(1, 3, INPUT_HEIGHT, INPUT_WIDTH,
dtype=torch.float32, device=DEVICE)
output = model(dummy_input.detach())
input_names = ['data']
output_names = ['output1']
torch.onnx.export(model, dummy_input,
class ResidualSep(nn.Module):
def __init__(self, channels, dilation=1):
super().__init__()
self.blocks = nn.Sequential(
nn.ReLU(),
nn.ReflectionPad2d(dilation),
nn.Conv2d(channels, channels, kernel_size=3, stride=1,
padding=0, dilation=dilation,
groups=channels, bias=False),
download_file('http://images.cocodataset.org/zips/train2017.zip',
COCO_IMAGES_ARCHIVE)
download_file('http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
COCO_ANNOTATIONS_ARCHIVE)
with ZipFile(COCO_ANNOTATIONS_ARCHIVE, 'r') as archive:
archive.extractall()
COCO_CATEGORY = 'person' # target category
WORK_DIR = Path('.')
# Archives to be downloaded
COCO_IMAGES_ARCHIVE = WORK_DIR / 'train2017.zip'
COCO_ANNOTATIONS_ARCHIVE = WORK_DIR / 'annotations_trainval2017.zip'
# Paths where the dataset will be extracted to
COCO_ANNOTATIONS_PATH = WORK_DIR / 'annotations/instances_train2017.json'