Last active
December 28, 2017 19:44
-
-
Save colesbury/08cf6a46632851e8ec7a6419345f0031 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- VariableType.cpp 2017-12-28 10:54:46.713957544 -0800 | |
+++ ./torch/csrc/autograd/generated/VariableType.cpp 2017-12-28 11:39:57.633026747 -0800 | |
@@ -5937,7 +5937,7 @@ | |
} | |
return Tensor(std::move(ret)); | |
} | |
-Tensor VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const { | |
+Tensor & VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
@@ -6087,7 +6087,7 @@ | |
} | |
return Tensor(std::move(ret)); | |
} | |
-Tensor VariableType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
+Tensor & VariableType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
@@ -6154,7 +6154,7 @@ | |
} | |
return Tensor(std::move(ret)); | |
} | |
-Tensor VariableType::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
+Tensor & VariableType::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
@@ -6308,80 +6308,6 @@ | |
} | |
return std::tuple<Tensor,Tensor>(std::move(ret)); | |
} | |
-Tensor VariableType::rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
- profiler::RecordFunction profiler("rrelu"); | |
- auto& self_ = unpack(self, "self", 0); | |
- auto noise = tensor(); | |
- auto& noise_ = static_cast<VariableImpl*>(noise.get())->data; | |
- std::shared_ptr<RreluBackward0> grad_fn; | |
- auto requires_grad = compute_requires_grad({ self }); | |
- if (requires_grad) { | |
- grad_fn = std::make_shared<RreluBackward0>(); | |
- grad_fn->next_functions = compute_next_functions({ self }); | |
- grad_fn->lower = lower; | |
- grad_fn->upper = upper; | |
- grad_fn->training = training; | |
- grad_fn->noise_ = SavedVariable(noise, false); | |
- } | |
- auto ret = as_variable(baseType->rrelu_forward(self_, lower, upper, training, generator, noise_)); | |
- set_history(ret, grad_fn); | |
- if (grad_fn) { | |
- auto& output = ret; | |
- grad_fn->output_ = SavedVariable(output, true); | |
- } | |
- return Tensor(std::move(ret)); | |
-} | |
-Tensor VariableType::rrelu_backward(const Tensor & grad_output, const Tensor & self, Scalar lower, Scalar upper, bool training, const Tensor & noise) const { | |
- profiler::RecordFunction profiler("rrelu_backward"); | |
- auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
- auto& self_ = unpack(self, "self", 1); | |
- auto& noise_ = unpack(noise, "noise", 5); | |
- check_no_requires_grad(noise, "noise"); | |
- std::shared_ptr<RreluBackwardBackward> grad_fn; | |
- auto requires_grad = compute_requires_grad({ grad_output, self }); | |
- if (requires_grad) { | |
- grad_fn = std::make_shared<RreluBackwardBackward>(); | |
- grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
- grad_fn->self_ = SavedVariable(self, false); | |
- grad_fn->lower = lower; | |
- grad_fn->upper = upper; | |
- grad_fn->training = training; | |
- grad_fn->noise_ = SavedVariable(noise, false); | |
- } | |
- auto ret = as_variable(baseType->rrelu_backward(grad_output_, self_, lower, upper, training, noise_)); | |
- set_history(ret, grad_fn); | |
- if (jit::tracer::isTracing( grad_output, self, noise )) { | |
- jit::Node *n = jit::tracer::recordTrace( "rrelu_backward", { grad_output, self, noise }, { ret } ); | |
- setattr(n, jit::stringToSymbol("lower"), lower); | |
- setattr(n, jit::stringToSymbol("upper"), upper); | |
- setattr(n, jit::stringToSymbol("training"), training); | |
- } | |
- return Tensor(std::move(ret)); | |
-} | |
-Tensor VariableType::rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
- profiler::RecordFunction profiler("rrelu_"); | |
- auto& self_ = unpack(self, "self", 0); | |
- auto noise = tensor(); | |
- auto& noise_ = static_cast<VariableImpl*>(noise.get())->data; | |
- check_inplace(self); | |
- std::shared_ptr<RreluBackward1> grad_fn; | |
- auto requires_grad = compute_requires_grad({ self }); | |
- if (requires_grad) { | |
- grad_fn = std::make_shared<RreluBackward1>(); | |
- grad_fn->next_functions = compute_next_functions({ self }); | |
- grad_fn->lower = lower; | |
- grad_fn->upper = upper; | |
- grad_fn->training = training; | |
- grad_fn->noise_ = SavedVariable(noise, false); | |
- } | |
- baseType->rrelu_forward_(self_, lower, upper, training, generator, noise_); | |
- increment_version(self); | |
- rebase_history(static_cast<Variable&>(self), grad_fn); | |
- if (grad_fn) { | |
- grad_fn->output_ = SavedVariable(self, true); | |
- } | |
- return self; | |
-} | |
Tensor VariableType::softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("softmax"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6517,6 +6443,80 @@ | |
} | |
return Tensor(std::move(ret)); | |
} | |
+Tensor VariableType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rrelu_with_noise"); | |
+ auto& self_ = unpack(self, "self", 0); | |
+ auto& noise_ = unpack(noise, "noise", 1); | |
+ check_no_requires_grad(noise, "noise"); | |
+ std::shared_ptr<RreluWithNoiseBackward0> grad_fn; | |
+ auto requires_grad = compute_requires_grad({ self }); | |
+ if (requires_grad) { | |
+ grad_fn = std::make_shared<RreluWithNoiseBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->noise_ = SavedVariable(noise, false); | |
+ grad_fn->lower = lower; | |
+ grad_fn->upper = upper; | |
+ grad_fn->training = training; | |
+ } | |
+ auto ret = as_variable(baseType->rrelu_with_noise_forward(self_, noise_, lower, upper, training, generator)); | |
+ set_history(ret, grad_fn); | |
+ if (grad_fn) { | |
+ auto& output = ret; | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return Tensor(std::move(ret)); | |
+} | |
+Tensor VariableType::rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const { | |
+ profiler::RecordFunction profiler("rrelu_with_noise_backward"); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& noise_ = unpack(noise, "noise", 2); | |
+ check_no_requires_grad(noise, "noise"); | |
+ std::shared_ptr<RreluWithNoiseBackwardBackward> grad_fn; | |
+ auto requires_grad = compute_requires_grad({ grad_output, self }); | |
+ if (requires_grad) { | |
+ grad_fn = std::make_shared<RreluWithNoiseBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->noise_ = SavedVariable(noise, false); | |
+ grad_fn->lower = lower; | |
+ grad_fn->upper = upper; | |
+ grad_fn->training = training; | |
+ } | |
+ auto ret = as_variable(baseType->rrelu_with_noise_backward(grad_output_, self_, noise_, lower, upper, training)); | |
+ set_history(ret, grad_fn); | |
+ if (jit::tracer::isTracing( grad_output, self, noise )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "rrelu_with_noise_backward", { grad_output, self, noise }, { ret } ); | |
+ setattr(n, jit::stringToSymbol("lower"), lower); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ setattr(n, jit::stringToSymbol("training"), training); | |
+ } | |
+ return Tensor(std::move(ret)); | |
+} | |
+Tensor & VariableType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rrelu_with_noise_"); | |
+ auto& self_ = unpack(self, "self", 0); | |
+ auto& noise_ = unpack(noise, "noise", 1); | |
+ check_inplace(self); | |
+ check_no_requires_grad(noise, "noise"); | |
+ std::shared_ptr<RreluWithNoiseBackward1> grad_fn; | |
+ auto requires_grad = compute_requires_grad({ self }); | |
+ if (requires_grad) { | |
+ grad_fn = std::make_shared<RreluWithNoiseBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->noise_ = SavedVariable(noise, false); | |
+ grad_fn->lower = lower; | |
+ grad_fn->upper = upper; | |
+ grad_fn->training = training; | |
+ } | |
+ baseType->rrelu_with_noise_forward_(self_, noise_, lower, upper, training, generator); | |
+ increment_version(self); | |
+ rebase_history(static_cast<Variable&>(self), grad_fn); | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(self, true); | |
+ } | |
+ return self; | |
+} | |
Tensor VariableType::threshold(const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6560,7 +6560,7 @@ | |
} | |
return Tensor(std::move(ret)); | |
} | |
-Tensor VariableType::threshold_(Tensor & self, Scalar threshold, Scalar value) const { | |
+Tensor & VariableType::threshold_(Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
@@ -8932,6 +8932,16 @@ | |
} | |
return Tensor(std::move(ret)); | |
} | |
+Tensor VariableType::rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rrelu"); | |
+ auto ret = Type::rrelu(self, lower, upper, training, generator); | |
+ return Tensor(std::move(ret)); | |
+} | |
+Tensor & VariableType::rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rrelu_"); | |
+ auto ret = Type::rrelu_(self, lower, upper, training, generator); | |
+ return self; | |
+} | |
Tensor VariableType::select(const Tensor & self, int64_t dim, int64_t index) const { | |
profiler::RecordFunction profiler("select"); | |
auto ret = Type::select(self, dim, index); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment