Created
March 24, 2018 16:09
-
-
Save salehjg/39f8ce81b527c6d14e3ed1f09723860b to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Modified evaluation script of PointNet for DGCNN | |
# (unofficial) | |
import argparse | |
import math | |
import h5py | |
import numpy as np | |
import tensorflow as tf | |
import socket | |
import importlib | |
import os | |
import sys | |
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) | |
sys.path.append(BASE_DIR) | |
sys.path.append(os.path.join(BASE_DIR, 'models')) | |
sys.path.append(os.path.join(BASE_DIR, 'utils')) | |
import provider | |
import tf_util | |
parser = argparse.ArgumentParser() | |
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') | |
parser.add_argument('--model', default='dgcnn', help='Model name: dgcnn') | |
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') | |
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') | |
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]') | |
parser.add_argument('--batch_size', type=int, default=24, help='Batch Size during training [default: 32]') | |
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') | |
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') | |
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') | |
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') | |
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]') | |
parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]') | |
parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]') | |
FLAGS = parser.parse_args() | |
BATCH_SIZE = FLAGS.batch_size | |
NUM_POINT = FLAGS.num_point | |
MAX_EPOCH = FLAGS.max_epoch | |
BASE_LEARNING_RATE = FLAGS.learning_rate | |
GPU_INDEX = FLAGS.gpu | |
MOMENTUM = FLAGS.momentum | |
OPTIMIZER = FLAGS.optimizer | |
DECAY_STEP = FLAGS.decay_step | |
DECAY_RATE = FLAGS.decay_rate | |
DUMP_DIR = FLAGS.dump_dir | |
MODEL_PATH = FLAGS.model_path | |
MODEL = importlib.import_module(FLAGS.model) # import network module | |
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py') | |
LOG_DIR = FLAGS.log_dir | |
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) | |
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def | |
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure | |
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') | |
LOG_FOUT.write(str(FLAGS)+'\n') | |
MAX_NUM_POINT = 2048 | |
NUM_CLASSES = 40 | |
BN_INIT_DECAY = 0.5 | |
BN_DECAY_DECAY_RATE = 0.5 | |
BN_DECAY_DECAY_STEP = float(DECAY_STEP) | |
BN_DECAY_CLIP = 0.99 | |
HOSTNAME = socket.gethostname() | |
# ModelNet40 official train/test split | |
TRAIN_FILES = provider.getDataFiles( \ | |
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt')) | |
TEST_FILES = provider.getDataFiles(\ | |
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt')) | |
SHAPE_NAMES = [line.rstrip() for line in \ | |
open(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] | |
def log_string(out_str): | |
LOG_FOUT.write(out_str+'\n') | |
LOG_FOUT.flush() | |
print(out_str) | |
def get_learning_rate(batch): | |
learning_rate = tf.train.exponential_decay( | |
BASE_LEARNING_RATE, # Base learning rate. | |
batch * BATCH_SIZE, # Current index into the dataset. | |
DECAY_STEP, # Decay step. | |
DECAY_RATE, # Decay rate. | |
staircase=True) | |
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! | |
return learning_rate | |
def get_bn_decay(batch): | |
bn_momentum = tf.train.exponential_decay( | |
BN_INIT_DECAY, | |
batch*BATCH_SIZE, | |
BN_DECAY_DECAY_STEP, | |
BN_DECAY_DECAY_RATE, | |
staircase=True) | |
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) | |
return bn_decay | |
def evaluate(num_votes): | |
is_training = False | |
with tf.device('/gpu:'+str(GPU_INDEX)): | |
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) | |
is_training_pl = tf.placeholder(tf.bool, shape=()) | |
print(is_training_pl) | |
# Note the global_step=batch parameter to minimize. | |
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains. | |
batch = tf.Variable(0) | |
bn_decay = get_bn_decay(batch) | |
tf.summary.scalar('bn_decay', bn_decay) | |
# Get model and loss | |
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) | |
loss = MODEL.get_loss(pred, labels_pl, end_points) | |
tf.summary.scalar('loss', loss) | |
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl)) | |
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE) | |
tf.summary.scalar('accuracy', accuracy) | |
# Get training operator | |
learning_rate = get_learning_rate(batch) | |
tf.summary.scalar('learning_rate', learning_rate) | |
if OPTIMIZER == 'momentum': | |
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) | |
elif OPTIMIZER == 'adam': | |
optimizer = tf.train.AdamOptimizer(learning_rate) | |
train_op = optimizer.minimize(loss, global_step=batch) | |
# Add ops to save and restore all the variables. | |
saver = tf.train.Saver() | |
# Create a session | |
config = tf.ConfigProto() | |
config.gpu_options.allow_growth = True | |
config.allow_soft_placement = True | |
config.log_device_placement = False | |
sess = tf.Session(config=config) | |
# Restore variables from disk. | |
saver.restore(sess, MODEL_PATH) | |
log_string("Model restored.") | |
# Add summary writers | |
#merged = tf.merge_all_summaries() | |
merged = tf.summary.merge_all() | |
ops = {'pointclouds_pl': pointclouds_pl, | |
'labels_pl': labels_pl, | |
'is_training_pl': is_training_pl, | |
'pred': pred, | |
'loss': loss, | |
'train_op': train_op, | |
'merged': merged, | |
'step': batch} | |
eval_one_epoch(sess, ops, num_votes) | |
def eval_one_epoch(sess, ops, num_votes=1, topk=1): | |
error_cnt = 0 | |
is_training = False | |
total_correct = 0 | |
total_seen = 0 | |
loss_sum = 0 | |
total_seen_class = [0 for _ in range(NUM_CLASSES)] | |
total_correct_class = [0 for _ in range(NUM_CLASSES)] | |
for fn in range(len(TEST_FILES)): | |
log_string('----'+str(fn)+'----') | |
current_data, current_label = provider.loadDataFile(TEST_FILES[fn]) | |
current_data = current_data[:,0:NUM_POINT,:] | |
current_label = np.squeeze(current_label) | |
print(current_data.shape) | |
file_size = current_data.shape[0] | |
num_batches = file_size // BATCH_SIZE | |
print(file_size) | |
for batch_idx in range(num_batches): | |
start_idx = batch_idx * BATCH_SIZE | |
end_idx = (batch_idx+1) * BATCH_SIZE | |
cur_batch_size = end_idx - start_idx | |
# Aggregating BEG | |
batch_loss_sum = 0 # sum of losses for the batch | |
batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes | |
batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes | |
for vote_idx in range(num_votes): | |
orig_data = current_data[start_idx:end_idx, :, :] | |
rotated_data = provider.rotate_point_cloud_by_angle(orig_data, | |
vote_idx/float(num_votes) * np.pi * 2) | |
feed_dict = {ops['pointclouds_pl']: rotated_data, #orig_data or rotated_data | |
ops['labels_pl']: current_label[start_idx:end_idx], | |
ops['is_training_pl']: is_training} | |
loss_val, pred_val = sess.run([ops['loss'], ops['pred']], | |
feed_dict=feed_dict) | |
batch_pred_sum += pred_val | |
batch_pred_val = np.argmax(pred_val, 1) | |
for el_idx in range(cur_batch_size): | |
batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1 | |
batch_loss_sum += (loss_val * cur_batch_size / float(num_votes)) | |
# pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1] | |
# pred_val = np.argmax(batch_pred_classes, 1) | |
pred_val = np.argmax(batch_pred_sum, 1) | |
# Aggregating END | |
correct = np.sum(pred_val == current_label[start_idx:end_idx]) | |
# correct = np.sum(pred_val_topk[:,0:topk] == label_val) | |
total_correct += correct | |
total_seen += cur_batch_size | |
loss_sum += batch_loss_sum | |
for i in range(start_idx, end_idx): | |
l = current_label[i] | |
total_seen_class[l] += 1 | |
total_correct_class[l] += (pred_val[i-start_idx] == l) | |
log_string('eval mean loss: %f' % (loss_sum / float(total_seen))) | |
log_string('eval accuracy: %f' % (total_correct / float(total_seen))) | |
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) | |
class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float) | |
for i, name in enumerate(SHAPE_NAMES): | |
log_string('%10s:\t%0.3f' % (name, class_accuracies[i])) | |
if __name__=='__main__': | |
with tf.Graph().as_default(): | |
evaluate(num_votes=1) | |
LOG_FOUT.close() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment