Skip to content

Instantly share code, notes, and snippets.

# pip3 install tornado pubnub python-dateutil
# python3 -u sfd.py | tee sfd.log
from multiprocessing import Process, Value, Lock, Event
from datetime import datetime
import dateutil.parser
from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub_tornado import PubNubTornado
from pubnub.pnconfiguration import PNReconnectionPolicy
/*
* L4D2特殊感染者BOTのプレイスタイルをカスタマイズする実験的なプラグイン
*
* 出現する特殊感染者BOTの2/3のだけ改変します
*
* キー入力をシミュレートすることでBOTを操作するため
* システムはデフォルトのまま!!
*
* addons/sourcemod/scripting
* において
require 'nn'
local N = 1000
local FEAT = 64
local S = 2
function nn.SpatialConvolution:reset()
self.weight:uniform(0, 1)
self.bias:zero()
end
require 'cutorch'
require 'cunn'
require 'cudnn'
require 'sys'
-- WINOGRAD benchmark
-- required: cuDNN v5, cudnn.torch R5 branch
function create_model(ch)
local model = nn.Sequential()
require 'cutorch'
require 'cunn'
require 'cudnn'
require 'sys'
-- WINOGRAD benchmark
-- required: cuDNN v5, cudnn.torch R5 branch
function create_model(ch) -- simple 3x3 conv model
local model = nn.Sequential()
// __attribute__((aligned(16))) float input[n], weight[n];
float dot_sse(float *input, float *weight, int n)
{
__attribute__((aligned(16))) float mm[4] = {0};
__m128 x, u;
int pk_lp = (n & 0xfffffffc);
int i;
float sum = 0;
u = _mm_setzero_ps();
for (i = 0; i < pk_lp; i += 4) {
name: "srcnn"
layer {
name: "input"
type: "Input"
top: "input"
input_param { shape: { dim: 1 dim: 3 dim: 32 dim: 32 } }
}
layer {
name: "conv1_layer"
type: "Convolution"
require 'nn'
torch.setdefaulttensortype("torch.FloatTensor")
upconv = nn.SpatialFullConvolution(1, 1, 2, 2, 2, 2, 0, 0)
input = torch.Tensor({{1, 10},{100, 1000}}):reshape(1, 1, 2, 2) -- (batch_size, input_dim, height, width)
weight = torch.Tensor({{1, 2}, {3, 4}}) -- 1x1x2x2 filter
bias = torch.Tensor({0.5})
upconv.weight:copy(weight)
upconv.bias:copy(bias)
import numpy as np
import scipy as sp
import xgboost as xgb
from hyperopt import hp, fmin, tpe
from sklearn import datasets
from sklearn import cross_validation
from sklearn.metrics import confusion_matrix
np.random.seed(71)
iris = datasets.load_iris()
This file has been truncated, but you can view the full file.
[{"bias":[0.028505884110928,-0.011944146826863,-0.031148914247751,-0.0031341155990958,0.0092963706701994,0.008396684192121,-0.052017644047737,-0.053563583642244,0.013980533927679,-0.0083225620910525,0.019794654101133,0.0097693046554923,0.019742390140891,-0.021908029913902,-0.04789212718606,-0.058312103152275,0.00098388339392841,-0.024875573813915,-0.00019801953749266,-0.0051305224187672,-0.035766143351793,-0.018864262849092,0.03208315372467,0.0075986147858202,-0.015769049525261,-0.043182034045458,-0.018727220594883,0.0072148125618696,-0.054116483777761,0.0087907928973436,-0.0050874976441264,0.0030902056023479],"kW":3,"nInputPlane":3,"weight":[[[[0.027103105559945,0.077817507088184,0.088963352143764],[0.050164110958576,-0.047492902725935,0.011296784505248],[-0.011886465363204,-0.13131293654442,-0.035226672887802]],[[0.044841166585684,-0.014278756454587,0.038773007690907],[0.19783321022987,-0.027543751522899,-0.16893927752972],[-0.010402355343103,-0.020798994228244,-0.071524895727634]],[[0.068569131195545,-0.10