Skip to content

Instantly share code, notes, and snippets.

@stevenhao
Created August 1, 2018 00:39
Show Gist options
  • Save stevenhao/a27b4367b6586099a88a8c93f283c9b2 to your computer and use it in GitHub Desktop.
Save stevenhao/a27b4367b6586099a88a8c93f283c9b2 to your computer and use it in GitHub Desktop.
import os
import pyquaternion
import numpy
import boto3
import json
from formatter import *
DIR = 'autox_data_150_frames/autox_pilot_data_first_150_frames'
# given a 4 space-separated numbers, return the corresponding Vector4
# > 0.2 0.12 -5 0.6
# Vector4(0.2, 0.12, -5.0, 0.6)
def parse_vector_4(line):
return Vector4(*map(float, line.split(' ')))
# signed image urls generated by python reuploadImages.py
with open('images.out', 'r') as f:
image_urls = json.load(f)
def matrix_to_quaternion(mat):
pyq = pyquaternion.Quaternion(matrix=mat)
return Quaternion(pyq.real, *pyq.imaginary)
def get_image(frame_number):
image_url = image_urls[str(frame_number)]
scale_factor = 1
# the 't' matrix from extrinsics.xml
# the position of the origin, in camera coordinates
_position = numpy.array([2.9597952961921692e-02, -1.0658357292413712e-01, 8.6099378764629364e-02])
# heading = Quaternion(0.5, -0.5, 0.5, -0.5)
# the "R" matrix from extrinsics.xml
# the rotation to go from world->camera coords
_heading = numpy.mat([
[6.4663961529731750e-02, -9.9790662527084351e-01, -9.5041986787691712e-04],
[2.1441327407956123e-02, 2.3415817413479090e-03, -9.9976736307144165e-01],
[ 9.9767673015594482e-01, 6.4628541469573975e-02, 2.1547857671976089e-02],
])
print _position
print (numpy.matmul(_heading.I, -_position))[0]
position = numpy.matmul(_heading.I, -_position)[0]
position = Vector3(position[0, 0], position[0, 1], position[0, 2])
heading = matrix_to_quaternion(_heading.I)
print heading.to_obj()
# the "K" matrix from intrinsics
# http://ksimek.github.io/2013/08/13/intrinsic/
[ [fx, skew, cx], [_, fy, cy], [_, _, _], ] = [
[ 1.5262145996093750e+03, 0., 6.5264837646484375e+02 ],
[0., 1.5262145996093750e+03, 3.3839697265625000e+02],
[0., 0., 1.],
]
# distortion coefficients from extrinsics
[k1, k2, p1, p2, k3] = [ -5.2631044387817383e-01, 4.5821169018745422e-01, 1.4467540895566344e-03, -2.1737588103860617e-03, -3.5449171066284180e-01]
return CameraImage(image_url, scale_factor, position, heading, fx, fy, cx, cy, skew, k1, k2, k3, p1, p2)
def load_frame(frame_number):
files = os.listdir(DIR)
points_path = next(f for f in files if f.startswith(str(frame_number)) and f.endswith('.xyz'))
if points_path == None:
raise Exception('Frame not found', frame_number)
with open(os.path.join(DIR, points_path), 'r') as fin:
points = [
parse_vector_4(line)
for line in fin.readlines()
]
images = [get_image(frame_number)]
device_position = Vector3(0, 0, 0)
return Frame(device_position=device_position, points=points, images=images)
if __name__ == '__main__':
frame_numbers = range(74, 373, 2)
# frame_numbers = [372]
for frame_num in frame_numbers:
print 'on', frame_num
frame = load_frame(frame_num)
# print frame.to_obj()
s3 = boto3.resource('s3')
data = json.dumps(frame.to_obj())
s3.Bucket('scaleapi-attachments').put_object(Key='autox-lidar/frame%d.json' % frame_num, Body=data)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment