Basic | Specification List |
---|---|
CPU | Octa-core (8x2.2 GHz Cortex-A53 ) |
Chipset | Qualcomm MSM8953 Snapdragon 625 |
GPU | Adreno 506 |
Memory | 4 GB |
Shipped Android Version | 6.0.1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#! /usr/bin/python | |
import dota2api | |
from dota2api.src.exceptions import APIError, APITimeoutError | |
import csv | |
from multiprocessing import Pool | |
import time | |
import sys | |
def getMatchInfo( api, matchId ): |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from selenium import webdriver | |
from selenium.webdriver.common.by import By | |
from selenium.webdriver.support.ui import WebDriverWait | |
from selenium.webdriver.support import expected_conditions as EC | |
from selenium.common import exceptions | |
import time | |
from retry import retry | |
from chatterbot import ChatBot |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Source code with the blog post at http://monik.in/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow/ | |
import numpy as np | |
import random | |
from random import shuffle | |
import tensorflow as tf | |
# from tensorflow.models.rnn import rnn_cell | |
# from tensorflow.models.rnn import rnn | |
NUM_EXAMPLES = 10000 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Source code with the blog post at http://monik.in/a-noobs-guide-to-implementing-rnn-lstm-using-tensorflow/ | |
import numpy as np | |
import random | |
from random import shuffle | |
import tensorflow as tf | |
# from tensorflow.models.rnn import rnn_cell | |
# from tensorflow.models.rnn import rnn | |
NUM_EXAMPLES = 10000 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#! /usr/bin/python3 | |
''' | |
deck = List[Set[Card]] | |
commands: | |
(d)iscard - discard the current infection rate of cards | |
(e)pidemic - input the epidemic card from the bottom | |
(r)emove - certain cards can remove cards from the discard deck. this command does this | |
(u)ndo - revert the last command if mistyped |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[MASTER] | |
# A comma-separated list of package or module names from where C extensions may | |
# be loaded. Extensions are loading into the active Python interpreter and may | |
# run arbitrary code | |
extension-pkg-whitelist= | |
# Add files or directories to the blacklist. They should be base names, not | |
# paths. | |
ignore=CVS |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# this function is used for quantizing activations | |
def quantize( zr, k ): # zr => number to quantize, k => number of bits to use | |
scaling = tf.cast( tf.pow( 2.0, k ) - 1, tf.float32 ) | |
return tf.round( scaling * zr )/scaling # round the number to the nearest quantize value | |
# this function applies quantization to activations | |
def shaped_relu( x, k = 1.0 ): # x => number to be quantized, k => number of bits to use | |
act = tf.clip_by_value( x, 0, 1 ) # clip the activation between 0 and 1 to stop overflow issues | |
quant = quantize( act, k ) # quantize the value | |
return act + tf.stop_gradient( quant - act ) # use the stop gradient trick |