Skip to content

Instantly share code, notes, and snippets.

auto grad_output_ = checked_tensor_unwrap(grad_output,"grad_output",1, false, Backend::CUDA, ScalarType::Double);
auto self_ = checked_tensor_unwrap(self,"self",2, false, Backend::CUDA, ScalarType::Double);
auto weight_ = checked_tensor_unwrap(weight,"weight",3, false, Backend::CUDA, ScalarType::Double);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto columns_ = checked_tensor_unwrap(columns,"columns",8, false, Backend::CUDA, ScalarType::Double);
auto ones_ = checked_tensor_unwrap(ones,"ones",9, false, Backend::CUDA, ScalarType::Double);
auto grad_input_ = output_mask[0] ? c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(caffe2::TypeMeta::Make<double>(), 0, allocator(), true),CUDAT
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as spc
from matplotlib import cm as cm
# Read file into a Pandas dataframe
from pandas import DataFrame, read_csv
df = read_csv('combined2.csv')
commit 841d4b9c81b06178f83adc0ee3ff78fa4fc0ca41
Author: Edward Z. Yang <[email protected]>
Date: Wed Apr 24 11:10:07 2019 -0400
Install VS 2019 on all Windows images.
Signed-off-by: Edward Z. Yang <[email protected]>
diff --git a/packer-windows/jenkins-win2012r2.json b/packer-windows/jenkins-win2012r2.json
index ee3af65..b713f96 100644
commit 841d4b9c81b06178f83adc0ee3ff78fa4fc0ca41
Author: Edward Z. Yang <[email protected]>
Date: Wed Apr 24 11:10:07 2019 -0400
Install VS 2019 on all Windows images.
Signed-off-by: Edward Z. Yang <[email protected]>
diff --git a/packer-windows/jenkins-win2012r2.json b/packer-windows/jenkins-win2012r2.json
index ee3af65..b713f96 100644
@ezyang
ezyang / MODULES
Last active April 11, 2019 16:12
Classification of every file in PyTorch into a module
# autograd
/test/test_autograd.py
/torch/autograd/
/torch/csrc/autograd/
/torch/utils/hooks.py
# benchmarks
/benchmarks/
# binaries
@ezyang
ezyang / cmake.log
Created January 29, 2019 16:15
cmake log when openmp didn't get enabled
+ SYNC_COMMAND=cp
++ command -v rsync
+ '[' -x /bin/rsync ']'
+ SYNC_COMMAND='rsync -lptgoD'
+ CMAKE_COMMAND=cmake
++ command -v cmake3
+ [[ -x /bin/cmake3 ]]
++ command -v cmake
+ [[ -x /home/ezyang/Dev/pytorch-tmp-env/bin/cmake ]]
++ cmake --version
@ezyang
ezyang / torch.pyi
Created January 29, 2019 04:33
torch.pyi
# @generated from tools/autograd/templates/torch/__init__.pyi
from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload
from torch._six import inf
import builtins
# These identifiers are reexported from other modules. These modules
# are not mypy-clean yet, so in order to use this stub file usefully
# from mypy you will need to specify --follow-imports=silent.
@ezyang
ezyang / boxing.md
Last active December 21, 2018 15:59

STALLED: Boxing and wrappers

Background

An unboxed representation is a non-uniform representation that coincides with the “best” machine representation but cannot be operated polymorphically over, without generating code for every representation. The following function has an unboxed signature:

void f(Tensor input, float x);
#pragma once
#include <c10/macros/Macros.h>
#if (defined(__HIPCC__) || defined(__HIPCC__))
#include <THH/THHDeviceUtils.cuh>
#include <ATen/native/hip/DeviceSqrt.cuh>
#else
#include <cmath>
#define device_sqrt std::sqrt
#endif
#include <cstdint>
// Placeholder for asserts; ignore them for now
#define AT_ASSERT(cond, ...)
// ArrayRef (comes from LLVM, ATen uses it, we think it's pretty good)
//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure