#A Collection of NLP notes
##N-grams
###Calculating unigram probabilities:
P( wi ) = count ( wi ) ) / count ( total number of words )
In english..
#A Collection of NLP notes
##N-grams
###Calculating unigram probabilities:
P( wi ) = count ( wi ) ) / count ( total number of words )
In english..
| # Author: Kyle Kastner# License: BSD 3-Clause# For a reference on parallel processing in Python see tutorial by David Beazley# http://www.slideshare.net/dabeaz/an-introduction-to-python-concurrency# Loosely based on IBM example# http://www.ibm.com/developerworks/aix/library/au-threadingpython/# If you want to download all the PASCAL VOC data, use the following in bash..."""#! /bin/bash# 2008wget http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar# 2009wget http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar# 2010wget http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar# 2011wget http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar# 2012wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar# Latest devkitwget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar"""try: import Queueexcept ImportError: import queue as Queueimport threadingimport ti |
| import cv2 | |
| import string, random | |
| vc = cv2.VideoCapture(0) | |
| if vc.isOpened(): # try to get the first frame | |
| rval, frame = vc.read() | |
| else: | |
| rval = False |
| # MIT License | |
| # | |
| # Copyright (c) 2016 David Sandberg | |
| # | |
| # Permission is hereby granted, free of charge, to any person obtaining a copy | |
| # of this software and associated documentation files (the "Software"), to deal | |
| # in the Software without restriction, including without limitation the rights | |
| # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| # copies of the Software, and to permit persons to whom the Software is | |
| # furnished to do so, subject to the following conditions: |
| """Keras implementation of SSD.""" | |
| import keras.backend as K | |
| from keras.layers import Activation | |
| from keras.layers import AtrousConv2D | |
| from keras.layers import Conv2D | |
| from keras.layers import Dense | |
| from keras.layers import Flatten | |
| from keras.layers import GlobalAveragePooling2D | |
| from keras.layers import Input |
| [?1034h{ | |
| protos : | |
| { | |
| cnn : | |
| { | |
| gradInput : FloatTensor - empty | |
| modules : | |
| { | |
| 1 : | |
| { |
| ##### | |
| # modifiled from https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/06_CIFAR-10.ipynb | |
| ##### | |
| import matplotlib.pyplot as plt | |
| import tensorflow as tf | |
| import numpy as np | |
| from sklearn.metrics import confusion_matrix | |
| import time | |
| from datetime import timedelta | |
| import math |
| My problems with the paper: | |
| - There is no comparison of resulting video quality. The amount of encode time (and power | |
| expended) to produce a H.264 bit stream *dramatically* depends on the desired quality level; | |
| e.g. for x264 (state of the art SW encoder, already in 2010 when the paper was written), the | |
| difference between the fastest and best quality settings is close to 2 orders of magnitude | |
| in both speed and power use. This is not negligible! | |
| [NOTE: This is excluding quality-presets like "placebo", which are more demanding still. | |
| Even just comparing between different settings usable for real-time encoding, we still have | |
| at least an order of magnitude difference.] | |
| - They have their encoder, which is apparently based on JM 8.6 (*not* a good encoder!), for |
As configured in my dotfiles.
start new:
tmux
start new with session name: