官方文档
其中PREFIX决定了所有缺省的路径
TMP_DIR = <prefix>/tmp
STAMP_DIR = <prefix>/src/<name>-stamp
DOWNLOAD_DIR = <prefix>/src
SOURCE_DIR = <prefix>/src/<name>
BINARY_DIR = <prefix>/src/<name>-build
INSTALL_DIR = <prefix>
'''Test of stateful LSTM. | |
This trains a LSTM to convert a frequency-modulated signal to a sine wave. | |
The period of the signal is greater than the temporal dimension of the LSTM, | |
so in theory the stateful version should have an advantage. | |
''' | |
from __future__ import print_function | |
import os | |
os.environ['KERAS_BACKEND'] = 'tensorflow' |
get_latest_release() { | |
curl --silent "https://api.github.com/repos/$1/releases/latest" | # Get latest release from GitHub api | |
grep '"tag_name":' | # Get tag line | |
sed -E 's/.*"([^"]+)".*/\1/' # Pluck JSON value | |
} | |
# Usage | |
# $ get_latest_release "creationix/nvm" | |
# v0.31.4 |
import time | |
from http.server import BaseHTTPRequestHandler, HTTPServer | |
HOST_NAME = 'localhost' | |
PORT_NUMBER = 9000 | |
class MyHandler(BaseHTTPRequestHandler): | |
def do_HEAD(self): | |
self.send_response(200) |
import logging | |
import sys | |
from urllib.parse import urljoin | |
import asyncio | |
import aiohttp | |
from aiohttp import web | |
TARGET_SERVER_BASE_URL = 'http://127.0.0.1:8888' |
""" | |
When classifying upon a sequence usually we stack some LSTM returning sequences, | |
then one LSTM returning a point, then Dense with softmax activation. | |
Is it possible instead to give the last non-sequential LSTM a softmax activation? | |
The answer is yes. | |
In this example we have 3 sequential layers and one layer producing the final result. |
官方文档
其中PREFIX决定了所有缺省的路径
TMP_DIR = <prefix>/tmp
STAMP_DIR = <prefix>/src/<name>-stamp
DOWNLOAD_DIR = <prefix>/src
SOURCE_DIR = <prefix>/src/<name>
BINARY_DIR = <prefix>/src/<name>-build
INSTALL_DIR = <prefix>
import multiprocessing | |
import pandas as pd | |
import numpy as np | |
def _apply_df(args): | |
df, func, num, kwargs = args | |
return num, df.apply(func, **kwargs) | |
def apply_by_multiprocessing(df,func,**kwargs): | |
workers=kwargs.pop('workers') |
import torch.utils.data | |
from torchvision import datasets, transforms | |
class PartialDataset(torch.utils.data.Dataset): | |
def __init__(self, parent_ds, offset, length): | |
self.parent_ds = parent_ds | |
self.offset = offset | |
self.length = length | |
assert len(parent_ds)>=offset+length, Exception("Parent Dataset not long enough") | |
super(PartialDataset, self).__init__() |