Skip to content

Instantly share code, notes, and snippets.

View colesbury's full-sized avatar

Sam Gross colesbury

View GitHub Profile
diff --git a/torch/csrc/autograd/functions/convolution.cpp b/torch/csrc/autograd/functions/convolution.cpp
index 465283c..991648c 100644
--- a/torch/csrc/autograd/functions/convolution.cpp
+++ b/torch/csrc/autograd/functions/convolution.cpp
@@ -578,6 +578,16 @@ auto ConvBackwardBackward::apply(const variable_list& grad_grad_inputs) -> varia
gI = Transpose(0, 1).apply({gIt})[0];
}
+ auto zeros_like = [](const Variable& var) -> std::shared_ptr<Variable> {
+ auto data = var.data->newTensor();
--- VariableType.cpp 2017-12-28 10:54:46.713957544 -0800
+++ ./torch/csrc/autograd/generated/VariableType.cpp 2017-12-28 11:39:57.633026747 -0800
@@ -5937,7 +5937,7 @@
}
return Tensor(std::move(ret));
}
-Tensor VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
+Tensor & VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
profiler::RecordFunction profiler("elu_");
auto& self_ = unpack(self, "self", 0);
--- CPUFloatType.cpp 2017-12-28 10:54:32.998921256 -0800
+++ ./torch/lib/build/aten/src/ATen/ATen/CPUFloatType.cpp 2017-12-28 11:30:40.333573856 -0800
@@ -4393,23 +4393,19 @@
grad_input_->maybeScalar(grad_output_->isScalar() && output_->isScalar());
return grad_input;
}
-Tensor CPUFloatType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
+Tensor & CPUFloatType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
auto self_ = checked_cast_tensor<CPUFloatTensor>(self.pImpl,"self",1, false);
auto alpha_ = alpha.toDouble();
#include "Python.h"
#include "VariableType.h"
// generated from tools/autograd/templates/VariableType.cpp
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/grad_mode.h"
#include "torch/csrc/autograd/saved_variable.h"
#include "torch/csrc/autograd/generated/Functions.h"
--- VariableType.cpp 2018-01-04 15:20:29.648778458 -0800
+++ ./torch/csrc/autograd/generated/VariableType.cpp 2018-01-04 15:21:05.167875765 -0800
@@ -188,18 +188,18 @@
return ret;
}
-static Variable as_variable(Tensor tensor) {
+static Tensor as_variable(Tensor tensor) {
return make_variable(std::move(tensor));
}
#include "Python.h"
#include "VariableType.h"
// generated from tools/autograd/templates/VariableType.cpp
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/grad_mode.h"
#include "torch/csrc/autograd/saved_variable.h"
#include "torch/csrc/autograd/generated/Functions.h"
--- VariableType.cpp 2018-01-09 12:45:35.044936501 -0800
+++ ./torch/csrc/autograd/generated/VariableType.cpp 2018-01-09 12:45:57.906005558 -0800
@@ -289,41 +289,67 @@
}
}
-static void rebase_history(Tensor& tensor, std::shared_ptr<Function> grad_fn, int output_nr=0) {
- if (!tensor.defined()) {
- return;
+static void check_output_args(const char* name, TensorList tensors) {
#include "Python.h"
#include "VariableType.h"
// generated from tools/autograd/templates/VariableType.cpp
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/grad_mode.h"
#include "torch/csrc/autograd/saved_variable.h"
#include "torch/csrc/autograd/generated/Functions.h"
// generated from tools/autograd/templates/python_torch_functions.cpp
// Python bindings for torch.* functions implemented through ATen.
//
// The functions are bound as static methods on a class
// torch._C._VariableFunctions which is also aliased as Variable._torch.
#include <Python.h>
#include "torch/csrc/Exceptions.h"
@colesbury
colesbury / -
Created February 28, 2018 19:07
diff --git a/2.ASimpleNeuralNetwork/numpy_like_fizbuz.py b/2.ASimpleNeuralNetwork/numpy_like_fizbuz.py
index 62fa394..b6a5f5f 100644
--- a/2.ASimpleNeuralNetwork/numpy_like_fizbuz.py
+++ b/2.ASimpleNeuralNetwork/numpy_like_fizbuz.py
@@ -44,14 +44,18 @@ y = torch.from_numpy(trY).type(dtype)
print(x.grad, x.grad_fn, x)
# None, None, [torch.FloatTensor of size 900x10]
-w1 = torch.randn(input_size, hidden_units, requires_grad=True).type(dtype)
+w1 = torch.randn(input_size, hidden_units).type(dtype)