https://wandb.ai/rom1504/dalle2_train_decoder/runs/mic5buox/files/decoder_config.json
get dalle2
get the config file
get these 2 .sh
run sbatch start_big.sh
https://wandb.ai/rom1504/dalle2_train_decoder/runs/mic5buox/files/decoder_config.json
get dalle2
get the config file
get these 2 .sh
run sbatch start_big.sh
| #!/bin/bash | |
| # | |
| # script to extract ImageNet dataset | |
| # ILSVRC2012_img_train.tar (about 138 GB) | |
| # ILSVRC2012_img_val.tar (about 6.3 GB) | |
| # make sure ILSVRC2012_img_train.tar & ILSVRC2012_img_val.tar in your current directory | |
| # | |
| # https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md | |
| # | |
| # train/ |
| #!/bin/bash | |
| # | |
| # script to extract ImageNet dataset | |
| # ILSVRC2012_img_train.tar (about 138 GB) | |
| # ILSVRC2012_img_val.tar (about 6.3 GB) | |
| # make sure ILSVRC2012_img_train.tar & ILSVRC2012_img_val.tar in your current directory | |
| # | |
| # https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md | |
| # | |
| # train/ |
There are a lot of Topic Models. 18/02/23 # of TM is 24
| import numpy as np | |
| def symdirichlet(alpha, n): | |
| v = np.zeros(n)+alpha | |
| return np.random.dirichlet(v) | |
| def exp_digamma(x): | |
| if x < 0.1: | |
| return x/100 |
| """ | |
| Code to parse sklearn classification_report | |
| """ | |
| ## | |
| import sys | |
| import collections | |
| ## | |
| def parse_classification_report(clfreport): | |
| """ | |
| Parse a sklearn classification report into a dict keyed by class name |
| """ | |
| preprocess-twitter.py | |
| python preprocess-twitter.py "Some random text with #hashtags, @mentions and http://t.co/kdjfkdjf (links). :)" | |
| Script for preprocessing tweets by Romain Paulus | |
| with small modifications by Jeffrey Pennington | |
| with translation to Python by Motoki Wu | |
| Translation of Ruby script to create features for GloVe vectors for Twitter data. |
| from keras.models import Graph | |
| from keras.layers import containers | |
| from keras.layers.core import Dense, Dropout, Activation, Reshape, Flatten | |
| from keras.layers.embeddings import Embedding | |
| from keras.layers.convolutional import Convolution2D, MaxPooling2D | |
| def ngram_cnn(n_vocab, max_length, embedding_size, ngram_filters=[2, 3, 4, 5], n_feature_maps=100, dropout=0.5, n_hidden=15): | |
| """A single-layer convolutional network using different n-gram filters. | |
| Parameters |
| """Information Retrieval metrics | |
| Useful Resources: | |
| http://www.cs.utexas.edu/~mooney/ir-course/slides/Evaluation.ppt | |
| http://www.nii.ac.jp/TechReports/05-014E.pdf | |
| http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf | |
| http://hal.archives-ouvertes.fr/docs/00/72/67/60/PDF/07-busa-fekete.pdf | |
| Learning to Rank for Information Retrieval (Tie-Yan Liu) | |
| """ | |
| import numpy as np |