Skip to content

Instantly share code, notes, and snippets.

@colesbury
Created January 4, 2018 23:25
Show Gist options
  • Save colesbury/95dc718b20a2323f087477b0e9aa3481 to your computer and use it in GitHub Desktop.
Save colesbury/95dc718b20a2323f087477b0e9aa3481 to your computer and use it in GitHub Desktop.
--- VariableType.cpp 2018-01-04 15:20:29.648778458 -0800
+++ ./torch/csrc/autograd/generated/VariableType.cpp 2018-01-04 15:21:05.167875765 -0800
@@ -188,18 +188,18 @@
return ret;
}
-static Variable as_variable(Tensor tensor) {
+static Tensor as_variable(Tensor tensor) {
return make_variable(std::move(tensor));
}
-static std::tuple<Variable, Variable>
+static std::tuple<Tensor, Tensor>
as_variable(std::tuple<Tensor, Tensor> tensors) {
return std::make_tuple<>(
make_variable(std::move(std::get<0>(tensors))),
make_variable(std::move(std::get<1>(tensors))));
}
-static std::tuple<Variable, Variable, Variable>
+static std::tuple<Tensor, Tensor, Tensor>
as_variable(std::tuple<Tensor, Tensor, Tensor> tensors) {
return std::make_tuple<>(
make_variable(std::move(std::get<0>(tensors))),
@@ -207,7 +207,7 @@
make_variable(std::move(std::get<2>(tensors))));
}
-static std::tuple<Variable, Variable, Variable, Variable>
+static std::tuple<Tensor, Tensor, Tensor, Tensor>
as_variable(std::tuple<Tensor, Tensor, Tensor, Tensor> tensors) {
return std::make_tuple<>(
make_variable(std::move(std::get<0>(tensors))),
@@ -216,19 +216,20 @@
make_variable(std::move(std::get<3>(tensors))));
}
-static std::vector<Variable> as_variable(TensorList tl) {
- std::vector<Variable> variables;
+static std::vector<Tensor> as_variable(TensorList tl) {
+ std::vector<Tensor> variables;
for (auto& t : tl) {
variables.emplace_back(make_variable(std::move(t)));
}
return variables;
}
-static Variable as_view(Variable base, Tensor tensor) {
- if (base.is_view()) {
- base = base.base();
+static Tensor as_view(const Tensor & base, Tensor tensor) {
+ auto base_var = Variable(base);
+ if (base_var.is_view()) {
+ base_var = base_var.base();
}
- return make_variable_view(std::move(base), std::move(tensor));
+ return make_variable_view(std::move(base_var), std::move(tensor));
}
static void ensure_no_aten_scalars(Tensor & data) {
@@ -254,17 +255,6 @@
using TensorRef = std::reference_wrapper<const Tensor>;
using TensorRefList = std::initializer_list<TensorRef>;
-// ArrayRef is not covariant, which means there is no
-// implicit conversion between TensorList (aka ArrayRef<Tensor>)
-// and ArrayRef<Variable>. What we do instead is manually
-// construct a variable_list, which itself is implicitly convertible
-// into an ArrayRef<Variable> (but don't return an ArrayRef<Variable>;
-// ArrayRef is non-owning!)
-static variable_list cast_tensor_list(const TensorList& tensors) {
- // TODO: Eliminate the intermediate vector allocation
- return variable_list(tensors.begin(), tensors.end());
-}
-
static bool compute_requires_grad(const TensorRefList& tensors) {
return computes_grad_tmpl(tensors);
}
@@ -299,10 +289,11 @@
}
}
-static void rebase_history(Variable& var, std::shared_ptr<Function> grad_fn, int output_nr=0) {
- if (!var.defined()) {
+static void rebase_history(Tensor& tensor, std::shared_ptr<Function> grad_fn, int output_nr=0) {
+ if (!tensor.defined()) {
return;
}
+ auto& var = static_cast<Variable&>(tensor);
if (grad_fn) {
grad_fn->num_inputs = 1;
var.rebase_history(output_nr, std::move(grad_fn));
@@ -311,7 +302,8 @@
// var must be the only differentiable output of the function. Use the ArrayRef
// overload for functions with multiple differentiable outputs.
-static void set_history(Variable& var, std::shared_ptr<Function> grad_fn, int output_nr=0) {
+static void set_history(Tensor& t, std::shared_ptr<Function> grad_fn, int output_nr=0) {
+ auto& var = static_cast<Variable&>(t);
if (grad_fn) {
grad_fn->num_inputs = 1;
var.get()->output_nr = output_nr;
@@ -319,13 +311,14 @@
}
}
-static void set_history(at::ArrayRef<Variable> vl, std::shared_ptr<Function> grad_fn) {
+static void set_history(at::ArrayRef<Tensor> tl, std::shared_ptr<Function> grad_fn) {
if (grad_fn) {
- grad_fn->num_inputs = vl.size();
+ grad_fn->num_inputs = tl.size();
int64_t output_nr = 0;
- for (auto& var : vl) {
- if (!var.defined()) continue;
+ for (auto& t : tl) {
+ if (!t.defined()) continue;
// TODO: combine this with the Variable construction
+ auto& var = static_cast<const Variable&>(t);
var.get()->output_nr = output_nr;
var.get()->_grad_fn = grad_fn;
output_nr++;
@@ -333,8 +326,9 @@
}
}
-static variable_list flatten(const TensorList& tensors) {
- return cast_tensor_list(tensors);
+static at::ArrayRef<Variable> flatten(TensorList tensors) {
+ auto data = static_cast<const Variable*>(tensors.data());
+ return at::ArrayRef<Variable>(data, tensors.size());
}
static variable_list flatten(const Tensor& x, const TensorList& y) {
@@ -345,7 +339,7 @@
return r;
}
-static variable_list flatten(const Tensor& x, const TensorList& y, const Tensor& z) {
+static variable_list flatten(const Tensor& x, TensorList y, const Tensor& z) {
std::vector<Variable> r;
r.reserve(2 + y.size());
r.emplace_back(x);
@@ -354,14 +348,6 @@
return r;
}
-static std::vector<Tensor> as_tensor_list(std::vector<Variable> &vars) {
- std::vector<Tensor> tensors;
- for (auto& v : vars) {
- tensors.emplace_back(std::move(v));
- }
- return tensors;
-}
-
static void increment_version(const Tensor & t) {
auto& var = static_cast<const Variable&>(t);
var.version_counter().increment();
@@ -424,49 +410,111 @@
int64_t VariableType::storage_offset(const Tensor & self) const {
auto& self_ = unpack(self, "self", 0);
- return baseType->storage_offset(self_);
+ auto result = baseType->storage_offset(self_);
+ return result;
}
Tensor VariableType::zeros(IntList size) const {
- return as_variable(baseType->zeros(size));
+ profiler::RecordFunction profiler("zeros");
+ auto result = as_variable(baseType->zeros(size));
+ return result;
}
Tensor VariableType::zeros_like(const Tensor & input) const {
+ profiler::RecordFunction profiler("zeros_like");
auto& input_ = unpack(input, "input", 0);
- return as_variable(baseType->zeros_like(input_));
+ auto result = as_variable(baseType->zeros_like(input_));
+ if (jit::tracer::isTracing( input )) {
+ jit::Node *n = jit::tracer::recordTrace( "zeros_like", { input }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor VariableType::ones(IntList size) const {
- return as_variable(baseType->ones(size));
+ profiler::RecordFunction profiler("ones");
+ auto result = as_variable(baseType->ones(size));
+ return result;
}
Tensor VariableType::ones_like(const Tensor & input) const {
+ profiler::RecordFunction profiler("ones_like");
auto& input_ = unpack(input, "input", 0);
- return as_variable(baseType->ones_like(input_));
+ auto result = as_variable(baseType->ones_like(input_));
+ if (jit::tracer::isTracing( input )) {
+ jit::Node *n = jit::tracer::recordTrace( "ones_like", { input }, { result } );
+ (void)n;
+ }
+ return result;
}
int64_t VariableType::numel(const Tensor & self) const {
auto& self_ = unpack(self, "self", 0);
- return baseType->numel(self_);
+ auto result = baseType->numel(self_);
+ return result;
}
Tensor & VariableType::set_(Tensor & self, Storage & storage) const {
+ profiler::RecordFunction profiler("set_");
auto& self_ = unpack(self, "self", 0);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
baseType->set_(self_, storage);
increment_version(self);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::set_(Tensor & self, Storage & sourceStorage, int64_t storage_offset, IntList size, IntList stride) const {
+ profiler::RecordFunction profiler("set_");
auto& self_ = unpack(self, "self", 0);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
baseType->set_(self_, sourceStorage, storage_offset, size, stride);
increment_version(self);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::set_(Tensor & self, const Tensor & source) const {
+ profiler::RecordFunction profiler("set_");
auto& self_ = unpack(self, "self", 0);
auto& source_ = unpack(source, "source", 1);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self, source })) {
+ grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self, source });
+
+ }
baseType->set_(self_, source_);
increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self, source )) {
+ jit::Node *n = jit::tracer::recordTrace( "set", { self, source }, { self } );
+ (void)n;
+ }
return self;
}
Tensor & VariableType::set_(Tensor & self) const {
+ profiler::RecordFunction profiler("set_");
auto& self_ = unpack(self, "self", 0);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
baseType->set_(self_);
increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "set", { self }, { self } );
+ (void)n;
+ }
return self;
}
Tensor & VariableType::fill_(Tensor & self, Scalar value) const {
@@ -474,10 +522,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<FillBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FillBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->fill_(self_, value);
increment_version(self);
@@ -481,7 +529,7 @@
}
baseType->fill_(self_, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "fill", { self }, { self } );
setattr(n, jit::stringToSymbol("value"), value);
@@ -489,16 +537,35 @@
return self;
}
Tensor & VariableType::fill_(Tensor & self, const Tensor & value) const {
- throw std::runtime_error("VariableType::fill_ NYI");
+ profiler::RecordFunction profiler("fill_");
+ auto& self_ = unpack(self, "self", 0);
+ auto& value_ = unpack(value, "value", 1);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self, value })) {
+ grad_fn = std::make_shared<Error>("the derivative for fill_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self, value });
+
+ }
+ baseType->fill_(self_, value_);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self, value )) {
+ jit::Node *n = jit::tracer::recordTrace( "fill", { self, value }, { self } );
+ (void)n;
+ }
+ return self;
}
bool VariableType::is_contiguous(const Tensor & self) const {
auto& self_ = unpack(self, "self", 0);
- return baseType->is_contiguous(self_);
+ auto result = baseType->is_contiguous(self_);
+ return result;
}
bool VariableType::is_set_to(const Tensor & self, const Tensor & tensor) const {
auto& self_ = unpack(self, "self", 0);
auto& tensor_ = unpack(tensor, "tensor", 1);
- return baseType->is_set_to(self_, tensor_);
+ auto result = baseType->is_set_to(self_, tensor_);
+ return result;
}
Tensor & VariableType::s_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) const {
profiler::RecordFunction profiler("masked_fill_");
@@ -506,15 +573,14 @@
auto& mask_ = unpack_byte(mask, "mask", 1);
check_inplace(self);
std::shared_ptr<MaskedFillBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaskedFillBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->mask_ = SavedVariable(mask, false);
}
baseType->s_masked_fill_(self_, mask_, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, mask )) {
jit::Node *n = jit::tracer::recordTrace( "masked_fill", { self, mask }, { self } );
setattr(n, jit::stringToSymbol("value"), value);
@@ -522,7 +588,25 @@
return self;
}
Tensor & VariableType::s_masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value) const {
- throw std::runtime_error("VariableType::masked_fill_ NYI");
+ profiler::RecordFunction profiler("masked_fill_");
+ auto& self_ = unpack(self, "self", 0);
+ auto& mask_ = unpack_byte(mask, "mask", 1);
+ auto& value_ = unpack(value, "value", 2);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self, mask, value })) {
+ grad_fn = std::make_shared<Error>("the derivative for masked_fill_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self, mask, value });
+
+ }
+ baseType->s_masked_fill_(self_, mask_, value_);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self, mask, value )) {
+ jit::Node *n = jit::tracer::recordTrace( "masked_fill", { self, mask, value }, { self } );
+ (void)n;
+ }
+ return self;
}
Tensor & VariableType::s_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) const {
profiler::RecordFunction profiler("masked_scatter_");
@@ -531,8 +615,7 @@
auto& source_ = unpack(source, "source", 2);
check_inplace(self);
std::shared_ptr<MaskedScatterBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, source });
- if (requires_grad) {
+ if (compute_requires_grad({ self, source })) {
grad_fn = std::make_shared<MaskedScatterBackward>();
grad_fn->next_functions = compute_next_functions({ self, source });
grad_fn->mask_ = SavedVariable(mask, false);
@@ -540,7 +623,7 @@
}
baseType->s_masked_scatter_(self_, mask_, source_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, mask, source )) {
jit::Node *n = jit::tracer::recordTrace( "masked_scatter", { self, mask, source }, { self } );
(void)n;
@@ -552,48 +635,45 @@
auto& self_ = unpack(self, "self", 0);
auto& mask_ = unpack_byte(mask, "mask", 1);
std::shared_ptr<MaskedSelectBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaskedSelectBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
grad_fn->mask_ = SavedVariable(mask, false);
}
- auto ret = as_variable(baseType->s_masked_select(self_, mask_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_masked_select(self_, mask_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, mask )) {
- jit::Node *n = jit::tracer::recordTrace( "masked_select", { self, mask }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "masked_select", { self, mask }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::transpose(const Tensor & self, int64_t dim0, int64_t dim1) const {
profiler::RecordFunction profiler("transpose");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TransposeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TransposeBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim0 = dim0;
grad_fn->dim1 = dim1;
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->transpose(self_, dim0, dim1));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->transpose(self_, dim0, dim1));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { result } );
setattr(n, jit::stringToSymbol("dim0"), dim0);
setattr(n, jit::stringToSymbol("dim1"), dim1);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::transpose_(Tensor & self, int64_t dim0, int64_t dim1) const {
profiler::RecordFunction profiler("transpose_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TransposeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TransposeBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim0 = dim0;
@@ -602,7 +682,7 @@
baseType->transpose_(self_, dim0, dim1);
ensure_no_aten_scalars(self);
increment_version(self);
- set_history(static_cast<Variable&>(self), grad_fn);
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { self } );
setattr(n, jit::stringToSymbol("dim0"), dim0);
@@ -614,28 +694,28 @@
profiler::RecordFunction profiler("t");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->t(self_));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->t(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "t", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "t", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::t_(Tensor & self) const {
profiler::RecordFunction profiler("t_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->t_(self_);
ensure_no_aten_scalars(self);
@@ -640,7 +720,7 @@
baseType->t_(self_);
ensure_no_aten_scalars(self);
increment_version(self);
- set_history(static_cast<Variable&>(self), grad_fn);
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "t", { self }, { self } );
(void)n;
@@ -650,75 +730,66 @@
Tensor VariableType::nonzero(const Tensor & self) const {
profiler::RecordFunction profiler("nonzero");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<NonzeroBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<NonzeroBackward>();
- grad_fn->next_functions = compute_next_functions({ self });
- }
- auto ret = as_variable(baseType->nonzero(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->nonzero(self_));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "nonzero", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nonzero", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::clone(const Tensor & self) const {
profiler::RecordFunction profiler("clone");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CloneBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CloneBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->clone(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->clone(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "clone", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "clone", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::view(const Tensor & self, IntList size) const {
profiler::RecordFunction profiler("view");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ViewBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ViewBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->view(self_, size));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->view(self_, size));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "view", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "view", { self }, { result } );
setattr(n, jit::stringToSymbol("size"), size);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::index_select(const Tensor & self, int64_t dim, const Tensor & index) const {
profiler::RecordFunction profiler("index_select");
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack_long(index, "index", 2);
std::shared_ptr<IndexSelectBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<IndexSelectBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->index_ = SavedVariable(index, false);
}
- auto ret = as_variable(baseType->index_select(self_, dim, index_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->index_select(self_, dim, index_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, index )) {
- jit::Node *n = jit::tracer::recordTrace( "index_select", { self, index }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "index_select", { self, index }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const {
profiler::RecordFunction profiler("index_copy_");
@@ -727,8 +798,7 @@
auto& source_ = unpack(source, "source", 3);
check_inplace(self);
std::shared_ptr<IndexCopyBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, source });
- if (requires_grad) {
+ if (compute_requires_grad({ self, source })) {
grad_fn = std::make_shared<IndexCopyBackward>();
grad_fn->next_functions = compute_next_functions({ self, source });
grad_fn->dim = dim;
@@ -736,7 +806,7 @@
}
baseType->index_copy_(self_, dim, index_, source_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index, source )) {
jit::Node *n = jit::tracer::recordTrace( "index_copy", { self, index, source }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -748,20 +818,19 @@
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack_long(index, "index", 1);
std::shared_ptr<TakeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TakeBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
grad_fn->index_ = SavedVariable(index, false);
}
- auto ret = as_variable(baseType->take(self_, index_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->take(self_, index_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, index )) {
- jit::Node *n = jit::tracer::recordTrace( "take", { self, index }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "take", { self, index }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) const {
profiler::RecordFunction profiler("put_");
@@ -770,8 +839,7 @@
auto& source_ = unpack(source, "source", 2);
check_inplace(self);
std::shared_ptr<PutBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, source });
- if (requires_grad) {
+ if (compute_requires_grad({ self, source })) {
grad_fn = std::make_shared<PutBackward>();
grad_fn->next_functions = compute_next_functions({ self, source });
grad_fn->index_ = SavedVariable(index, false);
@@ -780,7 +848,7 @@
}
baseType->put_(self_, index_, source_, accumulate);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index, source )) {
jit::Node *n = jit::tracer::recordTrace( "put", { self, index, source }, { self } );
setattr(n, jit::stringToSymbol("accumulate"), accumulate);
@@ -794,8 +862,7 @@
auto& source_ = unpack(source, "source", 3);
check_inplace(self);
std::shared_ptr<IndexAddBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, source });
- if (requires_grad) {
+ if (compute_requires_grad({ self, source })) {
grad_fn = std::make_shared<IndexAddBackward>();
grad_fn->next_functions = compute_next_functions({ self, source });
grad_fn->dim = dim;
@@ -803,7 +870,7 @@
}
baseType->index_add_(self_, dim, index_, source_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index, source )) {
jit::Node *n = jit::tracer::recordTrace( "index_add", { self, index, source }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -816,8 +883,7 @@
auto& index_ = unpack_long(index, "index", 2);
check_inplace(self);
std::shared_ptr<IndexFillBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<IndexFillBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim = dim;
@@ -825,7 +891,7 @@
}
baseType->index_fill_(self_, dim, index_, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index )) {
jit::Node *n = jit::tracer::recordTrace( "index_fill", { self, index }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -834,14 +900,31 @@
return self;
}
Tensor & VariableType::index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) const {
- throw std::runtime_error("VariableType::index_fill_ NYI");
+ profiler::RecordFunction profiler("index_fill_");
+ auto& self_ = unpack(self, "self", 0);
+ auto& index_ = unpack_long(index, "index", 2);
+ auto& value_ = unpack(value, "value", 3);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self, index, value })) {
+ grad_fn = std::make_shared<Error>("the derivative for index_fill_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self, index, value });
+
+ }
+ baseType->index_fill_(self_, dim, index_, value_);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self, index, value )) {
+ jit::Node *n = jit::tracer::recordTrace( "index_fill", { self, index, value }, { self } );
+ setattr(n, jit::stringToSymbol("dim"), dim);
+ }
+ return self;
}
Tensor VariableType::unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) const {
profiler::RecordFunction profiler("unfold");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UnfoldBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UnfoldBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
@@ -849,24 +932,30 @@
grad_fn->size = size;
grad_fn->step = step;
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->unfold(self_, dimension, size, step));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->unfold(self_, dimension, size, step));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "unfold", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "unfold", { self }, { result } );
setattr(n, jit::stringToSymbol("dimension"), dimension);
setattr(n, jit::stringToSymbol("size"), size);
setattr(n, jit::stringToSymbol("step"), step);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::range(Scalar start, Scalar end, Scalar step) const {
- return as_variable(baseType->range(start, end, step));
+ profiler::RecordFunction profiler("range");
+ auto result = as_variable(baseType->range(start, end, step));
+ return result;
}
Tensor VariableType::arange(Scalar start, Scalar end, Scalar step) const {
- return as_variable(baseType->arange(start, end, step));
+ profiler::RecordFunction profiler("arange");
+ auto result = as_variable(baseType->arange(start, end, step));
+ return result;
}
Tensor VariableType::arange(Scalar end) const {
- return as_variable(baseType->arange(end));
+ profiler::RecordFunction profiler("arange");
+ auto result = as_variable(baseType->arange(end));
+ return result;
}
Tensor & VariableType::scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) const {
profiler::RecordFunction profiler("scatter_");
@@ -875,8 +964,7 @@
auto& src_ = unpack(src, "src", 3);
check_inplace(self);
std::shared_ptr<ScatterBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self, src });
- if (requires_grad) {
+ if (compute_requires_grad({ self, src })) {
grad_fn = std::make_shared<ScatterBackward0>();
grad_fn->next_functions = compute_next_functions({ self, src });
grad_fn->dim = dim;
@@ -884,7 +972,7 @@
}
baseType->scatter_(self_, dim, index_, src_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index, src )) {
jit::Node *n = jit::tracer::recordTrace( "scatter", { self, index, src }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -897,8 +985,7 @@
auto& index_ = unpack_long(index, "index", 2);
check_inplace(self);
std::shared_ptr<ScatterBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ScatterBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim = dim;
@@ -906,7 +993,7 @@
}
baseType->scatter_(self_, dim, index_, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index )) {
jit::Node *n = jit::tracer::recordTrace( "scatter", { self, index }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -921,8 +1008,7 @@
auto& src_ = unpack(src, "src", 3);
check_inplace(self);
std::shared_ptr<ScatterAddBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, src });
- if (requires_grad) {
+ if (compute_requires_grad({ self, src })) {
grad_fn = std::make_shared<ScatterAddBackward>();
grad_fn->next_functions = compute_next_functions({ self, src });
grad_fn->dim = dim;
@@ -930,7 +1016,7 @@
}
baseType->scatter_add_(self_, dim, index_, src_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, index, src )) {
jit::Node *n = jit::tracer::recordTrace( "scatter_add", { self, index, src }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -942,193 +1028,277 @@
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack_long(index, "index", 2);
std::shared_ptr<GatherBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<GatherBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->index_ = SavedVariable(index, false);
}
- auto ret = as_variable(baseType->gather(self_, dim, index_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->gather(self_, dim, index_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, index )) {
- jit::Node *n = jit::tracer::recordTrace( "gather", { self, index }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "gather", { self, index }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
void* VariableType::data_ptr(const Tensor & self) const {
auto& self_ = unpack(self, "self", 0);
- return baseType->data_ptr(self_);
+ auto result = baseType->data_ptr(self_);
+ return result;
}
bool VariableType::equal(const Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("equal");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- return baseType->equal(self_, other_);
+ auto result = baseType->equal(self_, other_);
+ return result;
}
Tensor VariableType::__and__(const Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__and__");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->__and__(self_, other));
+ auto result = as_variable(baseType->__and__(self_, other));
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__and_", { self }, { result } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
+ return result;
}
Tensor VariableType::s___and__(const Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__and__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- return as_variable(baseType->s___and__(self_, other_));
+ auto result = as_variable(baseType->s___and__(self_, other_));
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__and_", { self, other }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor & VariableType::__iand__(Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__iand__");
auto& self_ = unpack(self, "self", 0);
baseType->__iand__(self_, other);
- increment_version(self);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__iand_", { self }, { self } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
return self;
}
Tensor & VariableType::s___iand__(Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__iand__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
baseType->s___iand__(self_, other_);
- increment_version(self);
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__iand_", { self, other }, { self } );
+ (void)n;
+ }
return self;
}
Tensor VariableType::__or__(const Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__or__");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->__or__(self_, other));
+ auto result = as_variable(baseType->__or__(self_, other));
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__or_", { self }, { result } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
+ return result;
}
Tensor VariableType::s___or__(const Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__or__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- return as_variable(baseType->s___or__(self_, other_));
+ auto result = as_variable(baseType->s___or__(self_, other_));
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__or_", { self, other }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor & VariableType::__ior__(Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__ior__");
auto& self_ = unpack(self, "self", 0);
baseType->__ior__(self_, other);
- increment_version(self);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__ior_", { self }, { self } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
return self;
}
Tensor & VariableType::s___ior__(Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__ior__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
baseType->s___ior__(self_, other_);
- increment_version(self);
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__ior_", { self, other }, { self } );
+ (void)n;
+ }
return self;
}
Tensor VariableType::__xor__(const Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__xor__");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->__xor__(self_, other));
+ auto result = as_variable(baseType->__xor__(self_, other));
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__xor_", { self }, { result } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
+ return result;
}
Tensor VariableType::s___xor__(const Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__xor__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- return as_variable(baseType->s___xor__(self_, other_));
+ auto result = as_variable(baseType->s___xor__(self_, other_));
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__xor_", { self, other }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor & VariableType::__ixor__(Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__ixor__");
auto& self_ = unpack(self, "self", 0);
baseType->__ixor__(self_, other);
- increment_version(self);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__ixor_", { self }, { self } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
return self;
}
Tensor & VariableType::s___ixor__(Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__ixor__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
baseType->s___ixor__(self_, other_);
- increment_version(self);
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__ixor_", { self, other }, { self } );
+ (void)n;
+ }
return self;
}
Tensor VariableType::__lshift__(const Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__lshift__");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->__lshift__(self_, other));
+ auto result = as_variable(baseType->__lshift__(self_, other));
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__lshift_", { self }, { result } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
+ return result;
}
Tensor VariableType::s___lshift__(const Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__lshift__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- return as_variable(baseType->s___lshift__(self_, other_));
+ auto result = as_variable(baseType->s___lshift__(self_, other_));
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__lshift_", { self, other }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor & VariableType::__ilshift__(Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__ilshift__");
auto& self_ = unpack(self, "self", 0);
baseType->__ilshift__(self_, other);
- increment_version(self);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__ilshift_", { self }, { self } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
return self;
}
Tensor & VariableType::s___ilshift__(Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__ilshift__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
baseType->s___ilshift__(self_, other_);
- increment_version(self);
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__ilshift_", { self, other }, { self } );
+ (void)n;
+ }
return self;
}
Tensor VariableType::__rshift__(const Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__rshift__");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->__rshift__(self_, other));
+ auto result = as_variable(baseType->__rshift__(self_, other));
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__rshift_", { self }, { result } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
+ return result;
}
Tensor VariableType::s___rshift__(const Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__rshift__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- return as_variable(baseType->s___rshift__(self_, other_));
+ auto result = as_variable(baseType->s___rshift__(self_, other_));
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__rshift_", { self, other }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor & VariableType::__irshift__(Tensor & self, Scalar other) const {
+ profiler::RecordFunction profiler("__irshift__");
auto& self_ = unpack(self, "self", 0);
baseType->__irshift__(self_, other);
- increment_version(self);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "__irshift_", { self }, { self } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ }
return self;
}
Tensor & VariableType::s___irshift__(Tensor & self, const Tensor & other) const {
+ profiler::RecordFunction profiler("__irshift__");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
baseType->s___irshift__(self_, other_);
- increment_version(self);
+ if (jit::tracer::isTracing( self, other )) {
+ jit::Node *n = jit::tracer::recordTrace( "__irshift_", { self, other }, { self } );
+ (void)n;
+ }
return self;
}
Tensor VariableType::lt(const Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("lt");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<LtBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<LtBackward0>();
- grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->lt(self_, other));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->lt(self_, other));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_lt(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("lt");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- std::shared_ptr<LtBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<LtBackward1>();
- grad_fn->next_functions = compute_next_functions({ self, other });
- grad_fn->other_info = other;
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->s_lt(self_, other_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->s_lt(self_, other_));
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::lt_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("lt_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<LtBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LtBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
}
baseType->lt_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -1141,8 +1311,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<LtBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<LtBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_info = other;
@@ -1150,7 +1319,7 @@
}
baseType->s_lt_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { self } );
(void)n;
@@ -1160,55 +1329,37 @@
Tensor VariableType::gt(const Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("gt");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<GtBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<GtBackward0>();
- grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->gt(self_, other));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->gt(self_, other));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_gt(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("gt");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- std::shared_ptr<GtBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<GtBackward1>();
- grad_fn->next_functions = compute_next_functions({ self, other });
- grad_fn->other_info = other;
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->s_gt(self_, other_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->s_gt(self_, other_));
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::gt_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("gt_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<GtBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<GtBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
}
baseType->gt_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -1221,8 +1372,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<GtBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<GtBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_info = other;
@@ -1230,7 +1380,7 @@
}
baseType->s_gt_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { self } );
(void)n;
@@ -1240,55 +1390,37 @@
Tensor VariableType::le(const Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("le");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<LeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<LeBackward0>();
- grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->le(self_, other));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->le(self_, other));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "le", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "le", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_le(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("le");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- std::shared_ptr<LeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<LeBackward1>();
- grad_fn->next_functions = compute_next_functions({ self, other });
- grad_fn->other_info = other;
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->s_le(self_, other_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->s_le(self_, other_));
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::le_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("le_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<LeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LeBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
}
baseType->le_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "le", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -1301,8 +1433,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<LeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<LeBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_info = other;
@@ -1310,7 +1441,7 @@
}
baseType->s_le_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { self } );
(void)n;
@@ -1320,55 +1451,37 @@
Tensor VariableType::ge(const Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("ge");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<GeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<GeBackward0>();
- grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->ge(self_, other));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->ge(self_, other));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_ge(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("ge");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- std::shared_ptr<GeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<GeBackward1>();
- grad_fn->next_functions = compute_next_functions({ self, other });
- grad_fn->other_info = other;
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->s_ge(self_, other_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->s_ge(self_, other_));
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::ge_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("ge_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<GeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<GeBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
}
baseType->ge_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -1381,8 +1494,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<GeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<GeBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_info = other;
@@ -1390,7 +1502,7 @@
}
baseType->s_ge_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { self } );
(void)n;
@@ -1400,55 +1512,37 @@
Tensor VariableType::eq(const Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("eq");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<EqBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<EqBackward0>();
- grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->eq(self_, other));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->eq(self_, other));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_eq(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("eq");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- std::shared_ptr<EqBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<EqBackward1>();
- grad_fn->next_functions = compute_next_functions({ self, other });
- grad_fn->other_info = other;
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->s_eq(self_, other_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->s_eq(self_, other_));
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::eq_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("eq_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<EqBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<EqBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_info = self;
}
baseType->eq_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -1461,8 +1555,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<EqBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<EqBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_info = other;
@@ -1470,7 +1563,7 @@
}
baseType->s_eq_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { self } );
(void)n;
@@ -1480,55 +1573,37 @@
Tensor VariableType::ne(const Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("ne");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<NeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<NeBackward0>();
- grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->ne(self_, other));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->ne(self_, other));
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_ne(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("ne");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
- std::shared_ptr<NeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<NeBackward1>();
- grad_fn->next_functions = compute_next_functions({ self, other });
- grad_fn->other_info = other;
- grad_fn->self_info = self;
- }
- auto ret = as_variable(baseType->s_ne(self_, other_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->s_ne(self_, other_));
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::ne_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("ne_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
- std::shared_ptr<NeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<NeBackward0>();
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for ne_ is not implemented");
grad_fn->next_functions = compute_next_functions({ self });
- grad_fn->self_info = self;
+
}
baseType->ne_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -1540,17 +1615,16 @@
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
- std::shared_ptr<NeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
- grad_fn = std::make_shared<NeBackward1>();
+ std::shared_ptr<NeBackward> grad_fn;
+ if (compute_requires_grad({ self, other })) {
+ grad_fn = std::make_shared<NeBackward>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_info = other;
grad_fn->self_info = self;
}
baseType->s_ne_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { self } );
(void)n;
@@ -1561,328 +1635,316 @@
profiler::RecordFunction profiler("min");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MinBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MinBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->min(self_, dim, keepdim));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor min, min_indices;
+ std::tie(min, min_indices) = as_variable(baseType->min(self_, dim, keepdim));
+ set_history(min, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "min", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "min", { self }, { min, min_indices } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& min_indices = std::get<1>(ret);
grad_fn->min_indices_ = SavedVariable(min_indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(min), std::move(min_indices));
}
Tensor VariableType::s_min(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("min");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<MinBackward2> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<MinBackward2>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->s_min(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_min(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "min", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "min", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::min(const Tensor & self) const {
profiler::RecordFunction profiler("min");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MinBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MinBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->min(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->min(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "min", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "min", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::max(const Tensor & self, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("max");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaxBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->max(self_, dim, keepdim));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor max, max_indices;
+ std::tie(max, max_indices) = as_variable(baseType->max(self_, dim, keepdim));
+ set_history(max, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "max", { self }, { max, max_indices } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& max_indices = std::get<1>(ret);
grad_fn->max_indices_ = SavedVariable(max_indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(max), std::move(max_indices));
}
Tensor VariableType::s_max(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("max");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<MaxBackward2> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<MaxBackward2>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->s_max(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_max(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "max", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::max(const Tensor & self) const {
profiler::RecordFunction profiler("max");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaxBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->max(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->max(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("kthvalue");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<KthvalueBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<KthvalueBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->kthvalue(self_, k, dim, keepdim));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor values, indices;
+ std::tie(values, indices) = as_variable(baseType->kthvalue(self_, k, dim, keepdim));
+ set_history(values, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "kthvalue", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "kthvalue", { self }, { values, indices } );
setattr(n, jit::stringToSymbol("k"), k);
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(values), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::mode(const Tensor & self, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("mode");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ModeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ModeBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->mode(self_, dim, keepdim));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor values, indices;
+ std::tie(values, indices) = as_variable(baseType->mode(self_, dim, keepdim));
+ set_history(values, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "mode", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "mode", { self }, { values, indices } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(values), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::median(const Tensor & self, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("median");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MedianBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MedianBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->median(self_, dim, keepdim));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor values, indices;
+ std::tie(values, indices) = as_variable(baseType->median(self_, dim, keepdim));
+ set_history(values, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "median", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "median", { self }, { values, indices } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(values), std::move(indices));
}
Tensor VariableType::median(const Tensor & self) const {
profiler::RecordFunction profiler("median");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MedianBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MedianBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->median(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->median(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "median", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "median", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::sort(const Tensor & self, int64_t dim, bool descending) const {
profiler::RecordFunction profiler("sort");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SortBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SortBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->sort(self_, dim, descending));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor values, indices;
+ std::tie(values, indices) = as_variable(baseType->sort(self_, dim, descending));
+ set_history(values, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sort", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "sort", { self }, { values, indices } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("descending"), descending);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(values), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const {
profiler::RecordFunction profiler("topk");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TopkBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TopkBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->topk(self_, k, dim, largest, sorted));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor values, indices;
+ std::tie(values, indices) = as_variable(baseType->topk(self_, k, dim, largest, sorted));
+ set_history(values, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "topk", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "topk", { self }, { values, indices } );
setattr(n, jit::stringToSymbol("k"), k);
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("largest"), largest);
setattr(n, jit::stringToSymbol("sorted"), sorted);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(values), std::move(indices));
}
bool VariableType::all(const Tensor & self) const {
+ profiler::RecordFunction profiler("all");
auto& self_ = unpack(self, "self", 0);
- return baseType->all(self_);
+ auto result = baseType->all(self_);
+ return result;
}
bool VariableType::any(const Tensor & self) const {
+ profiler::RecordFunction profiler("any");
auto& self_ = unpack(self, "self", 0);
- return baseType->any(self_);
+ auto result = baseType->any(self_);
+ return result;
}
int64_t VariableType::get_device(const Tensor & self) const {
auto& self_ = unpack(self, "self", 0);
- return baseType->get_device(self_);
+ auto result = baseType->get_device(self_);
+ return result;
}
Tensor VariableType::abs(const Tensor & self) const {
profiler::RecordFunction profiler("abs");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AbsBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AbsBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->abs(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->abs(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::abs_(Tensor & self) const {
profiler::RecordFunction profiler("abs_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<AbsBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AbsBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->abs_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { self } );
(void)n;
@@ -1894,10 +1956,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SigmoidBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SigmoidBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->sigmoid_(self_);
increment_version(self);
@@ -1901,7 +1963,7 @@
}
baseType->sigmoid_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { self } );
(void)n;
@@ -1915,37 +1977,35 @@
profiler::RecordFunction profiler("sigmoid");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SigmoidBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SigmoidBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->sigmoid(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sigmoid(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::log_(Tensor & self) const {
profiler::RecordFunction profiler("log_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<LogBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LogBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->log_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "log", { self }, { self } );
(void)n;
@@ -1956,34 +2016,32 @@
profiler::RecordFunction profiler("log");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<LogBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LogBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->log(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->log(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "log", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::log1p_(Tensor & self) const {
profiler::RecordFunction profiler("log1p_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<Log1PBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<Log1PBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->log1p_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { self } );
(void)n;
@@ -1994,52 +2052,49 @@
profiler::RecordFunction profiler("log1p");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<Log1PBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<Log1PBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->log1p(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->log1p(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::lgamma(const Tensor & self) const {
profiler::RecordFunction profiler("lgamma");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<LgammaBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LgammaBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->lgamma(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->lgamma(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::lgamma_(Tensor & self) const {
profiler::RecordFunction profiler("lgamma_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<LgammaBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LgammaBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->lgamma_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { self } );
(void)n;
@@ -2050,34 +2105,32 @@
profiler::RecordFunction profiler("digamma");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<DigammaBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<DigammaBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->digamma(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->digamma(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "digamma", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "digamma", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::digamma_(Tensor & self) const {
profiler::RecordFunction profiler("digamma_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<DigammaBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<DigammaBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->digamma_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "digamma", { self }, { self } );
(void)n;
@@ -2088,33 +2141,48 @@
profiler::RecordFunction profiler("polygamma");
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<PolygammaBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PolygammaBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->n = n;
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->polygamma(n, self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->polygamma(n, self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "polygamma", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "polygamma", { self }, { result } );
setattr(n, jit::stringToSymbol("n"), n);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::polygamma_(Tensor & self, int64_t n) const {
- throw std::runtime_error("VariableType::polygamma_ NYI");
+ profiler::RecordFunction profiler("polygamma_");
+ auto& self_ = unpack(self, "self", 0);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for polygamma_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ baseType->polygamma_(self_, n);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "polygamma", { self }, { self } );
+ setattr(n, jit::stringToSymbol("n"), n);
+ }
+ return self;
}
Tensor & VariableType::exp_(Tensor & self) const {
profiler::RecordFunction profiler("exp_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ExpBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ExpBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->exp_(self_);
increment_version(self);
@@ -2118,7 +2186,7 @@
}
baseType->exp_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { self } );
(void)n;
@@ -2132,32 +2200,31 @@
profiler::RecordFunction profiler("exp");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ExpBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ExpBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->exp(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->exp(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::expm1_(Tensor & self) const {
profiler::RecordFunction profiler("expm1_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<Expm1Backward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<Expm1Backward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->expm1_(self_);
increment_version(self);
@@ -2161,7 +2228,7 @@
}
baseType->expm1_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { self } );
(void)n;
@@ -2175,37 +2242,35 @@
profiler::RecordFunction profiler("expm1");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<Expm1Backward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<Expm1Backward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->expm1(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->expm1(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::cos_(Tensor & self) const {
profiler::RecordFunction profiler("cos_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<CosBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CosBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->cos_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { self } );
(void)n;
@@ -2216,34 +2281,32 @@
profiler::RecordFunction profiler("cos");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CosBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CosBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->cos(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cos(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::acos_(Tensor & self) const {
profiler::RecordFunction profiler("acos_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<AcosBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AcosBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->acos_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { self } );
(void)n;
@@ -2254,34 +2317,32 @@
profiler::RecordFunction profiler("acos");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AcosBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AcosBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->acos(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->acos(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::cosh_(Tensor & self) const {
profiler::RecordFunction profiler("cosh_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<CoshBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CoshBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->cosh_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { self } );
(void)n;
@@ -2292,34 +2353,32 @@
profiler::RecordFunction profiler("cosh");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CoshBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CoshBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->cosh(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cosh(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::sin_(Tensor & self) const {
profiler::RecordFunction profiler("sin_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SinBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SinBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->sin_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { self } );
(void)n;
@@ -2330,34 +2389,32 @@
profiler::RecordFunction profiler("sin");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SinBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SinBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->sin(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sin(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::asin_(Tensor & self) const {
profiler::RecordFunction profiler("asin_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<AsinBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AsinBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->asin_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { self } );
(void)n;
@@ -2368,34 +2425,32 @@
profiler::RecordFunction profiler("asin");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AsinBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AsinBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->asin(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->asin(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::sinh_(Tensor & self) const {
profiler::RecordFunction profiler("sinh_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SinhBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SinhBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->sinh_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { self } );
(void)n;
@@ -2406,29 +2461,28 @@
profiler::RecordFunction profiler("sinh");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SinhBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SinhBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->sinh(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sinh(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::tan_(Tensor & self) const {
profiler::RecordFunction profiler("tan_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TanBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TanBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->tan_(self_);
increment_version(self);
@@ -2432,7 +2486,7 @@
}
baseType->tan_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { self } );
(void)n;
@@ -2446,37 +2500,35 @@
profiler::RecordFunction profiler("tan");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TanBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TanBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->tan(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->tan(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::atan_(Tensor & self) const {
profiler::RecordFunction profiler("atan_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<AtanBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AtanBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->atan_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { self } );
(void)n;
@@ -2487,29 +2539,28 @@
profiler::RecordFunction profiler("atan");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AtanBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AtanBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->atan(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->atan(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::tanh_(Tensor & self) const {
profiler::RecordFunction profiler("tanh_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TanhBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TanhBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->tanh_(self_);
increment_version(self);
@@ -2513,7 +2564,7 @@
}
baseType->tanh_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { self } );
(void)n;
@@ -2527,37 +2578,35 @@
profiler::RecordFunction profiler("tanh");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TanhBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TanhBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->tanh(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->tanh(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::erf_(Tensor & self) const {
profiler::RecordFunction profiler("erf_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ErfBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ErfBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->erf_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { self } );
(void)n;
@@ -2568,34 +2617,32 @@
profiler::RecordFunction profiler("erf");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ErfBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ErfBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->erf(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->erf(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::erfinv_(Tensor & self) const {
profiler::RecordFunction profiler("erfinv_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ErfinvBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ErfinvBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
}
baseType->erfinv_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { self } );
(void)n;
@@ -2606,29 +2653,28 @@
profiler::RecordFunction profiler("erfinv");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ErfinvBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ErfinvBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->erfinv(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->erfinv(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::sqrt_(Tensor & self) const {
profiler::RecordFunction profiler("sqrt_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SqrtBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SqrtBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->sqrt_(self_);
increment_version(self);
@@ -2632,7 +2678,7 @@
}
baseType->sqrt_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { self } );
(void)n;
@@ -2646,32 +2692,31 @@
profiler::RecordFunction profiler("sqrt");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SqrtBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SqrtBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->sqrt(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sqrt(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::rsqrt_(Tensor & self) const {
profiler::RecordFunction profiler("rsqrt_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RsqrtBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RsqrtBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->rsqrt_(self_);
increment_version(self);
@@ -2675,7 +2720,7 @@
}
baseType->rsqrt_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { self } );
(void)n;
@@ -2689,32 +2734,31 @@
profiler::RecordFunction profiler("rsqrt");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<RsqrtBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RsqrtBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->rsqrt(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->rsqrt(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::ceil_(Tensor & self) const {
profiler::RecordFunction profiler("ceil_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<CeilBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CeilBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->ceil_(self_);
increment_version(self);
@@ -2718,7 +2762,7 @@
}
baseType->ceil_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { self } );
(void)n;
@@ -2729,28 +2773,28 @@
profiler::RecordFunction profiler("ceil");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CeilBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CeilBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->ceil(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->ceil(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::floor_(Tensor & self) const {
profiler::RecordFunction profiler("floor_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<FloorBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FloorBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->floor_(self_);
increment_version(self);
@@ -2754,7 +2798,7 @@
}
baseType->floor_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { self } );
(void)n;
@@ -2765,28 +2809,28 @@
profiler::RecordFunction profiler("floor");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FloorBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FloorBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->floor(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->floor(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::round_(Tensor & self) const {
profiler::RecordFunction profiler("round_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RoundBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RoundBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->round_(self_);
increment_version(self);
@@ -2790,7 +2834,7 @@
}
baseType->round_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "round", { self }, { self } );
(void)n;
@@ -2801,28 +2845,28 @@
profiler::RecordFunction profiler("round");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<RoundBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RoundBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->round(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->round(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "round", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "round", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::trunc_(Tensor & self) const {
profiler::RecordFunction profiler("trunc_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TruncBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TruncBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->trunc_(self_);
increment_version(self);
@@ -2826,7 +2870,7 @@
}
baseType->trunc_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { self } );
(void)n;
@@ -2837,28 +2881,28 @@
profiler::RecordFunction profiler("trunc");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TruncBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TruncBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->trunc(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->trunc(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::frac_(Tensor & self) const {
profiler::RecordFunction profiler("frac_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<FracBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FracBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->frac_(self_);
increment_version(self);
@@ -2862,7 +2906,7 @@
}
baseType->frac_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { self } );
(void)n;
@@ -2873,25 +2917,24 @@
profiler::RecordFunction profiler("frac");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FracBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FracBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->frac(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->frac(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::mean(const Tensor & self, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("mean");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MeanBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MeanBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
@@ -2899,40 +2942,38 @@
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->mean(self_, dim, keepdim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->mean(self_, dim, keepdim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::mean(const Tensor & self) const {
profiler::RecordFunction profiler("mean");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MeanBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MeanBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->mean(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->mean(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::var(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const {
profiler::RecordFunction profiler("var");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<VarBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<VarBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -2940,41 +2981,39 @@
grad_fn->unbiased = unbiased;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->var(self_, dim, unbiased, keepdim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->var(self_, dim, unbiased, keepdim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "var", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "var", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("unbiased"), unbiased);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::var(const Tensor & self, bool unbiased) const {
profiler::RecordFunction profiler("var");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<VarBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<VarBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->unbiased = unbiased;
}
- auto ret = as_variable(baseType->var(self_, unbiased));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->var(self_, unbiased));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "var", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "var", { self }, { result } );
setattr(n, jit::stringToSymbol("unbiased"), unbiased);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::std(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const {
profiler::RecordFunction profiler("std");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<StdBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<StdBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -2982,49 +3021,45 @@
grad_fn->unbiased = unbiased;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->std(self_, dim, unbiased, keepdim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->std(self_, dim, unbiased, keepdim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "std", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "std", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("unbiased"), unbiased);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::std(const Tensor & self, bool unbiased) const {
profiler::RecordFunction profiler("std");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<StdBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<StdBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->unbiased = unbiased;
}
- auto ret = as_variable(baseType->std(self_, unbiased));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->std(self_, unbiased));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "std", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "std", { self }, { result } );
setattr(n, jit::stringToSymbol("unbiased"), unbiased);
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("norm");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NormBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NormBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -3032,49 +3067,45 @@
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->norm(self_, p, dim, keepdim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->norm(self_, p, dim, keepdim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { result } );
setattr(n, jit::stringToSymbol("p"), p);
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::norm(const Tensor & self, Scalar p) const {
profiler::RecordFunction profiler("norm");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NormBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NormBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->p = p;
}
- auto ret = as_variable(baseType->norm(self_, p));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->norm(self_, p));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { result } );
setattr(n, jit::stringToSymbol("p"), p);
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const {
profiler::RecordFunction profiler("renorm");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<RenormBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RenormBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -3082,23 +3113,22 @@
grad_fn->dim = dim;
grad_fn->maxnorm = maxnorm;
}
- auto ret = as_variable(baseType->renorm(self_, p, dim, maxnorm));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->renorm(self_, p, dim, maxnorm));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { result } );
setattr(n, jit::stringToSymbol("p"), p);
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("maxnorm"), maxnorm);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const {
profiler::RecordFunction profiler("renorm_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RenormBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RenormBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -3108,7 +3138,7 @@
}
baseType->renorm_(self_, p, dim, maxnorm);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { self } );
setattr(n, jit::stringToSymbol("p"), p);
@@ -3122,56 +3152,53 @@
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<DistBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<DistBackward>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->other_ = SavedVariable(other, false);
grad_fn->p = p;
}
- auto ret = as_variable(baseType->s_dist(self_, other_, p));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_dist(self_, other_, p));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "dist", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "dist", { self, other }, { result } );
setattr(n, jit::stringToSymbol("p"), p);
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::reciprocal(const Tensor & self) const {
profiler::RecordFunction profiler("reciprocal");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReciprocalBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReciprocalBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->reciprocal(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->reciprocal(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::reciprocal_(Tensor & self) const {
profiler::RecordFunction profiler("reciprocal_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ReciprocalBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReciprocalBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->reciprocal_(self_);
increment_version(self);
@@ -3175,7 +3202,7 @@
}
baseType->reciprocal_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { self } );
(void)n;
@@ -3189,28 +3216,28 @@
profiler::RecordFunction profiler("neg");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NegBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NegBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->neg(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->neg(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::neg_(Tensor & self) const {
profiler::RecordFunction profiler("neg_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<NegBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NegBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->neg_(self_);
increment_version(self);
@@ -3214,7 +3241,7 @@
}
baseType->neg_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { self } );
(void)n;
@@ -3226,20 +3253,19 @@
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<Atan2Backward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<Atan2Backward>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->s_atan2(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_atan2(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::s_atan2_(Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("atan2_");
@@ -3247,8 +3273,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<Atan2Backward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<Atan2Backward>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -3256,7 +3281,7 @@
}
baseType->s_atan2_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { self } );
(void)n;
@@ -3267,48 +3292,45 @@
profiler::RecordFunction profiler("pow");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PowBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PowBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->exponent = exponent;
}
- auto ret = as_variable(baseType->pow(self_, exponent));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->pow(self_, exponent));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { result } );
setattr(n, jit::stringToSymbol("exponent"), exponent);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_pow(const Tensor & self, const Tensor & exponent) const {
profiler::RecordFunction profiler("pow");
auto& self_ = unpack(self, "self", 0);
auto& exponent_ = unpack(exponent, "exponent", 1);
std::shared_ptr<PowBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, exponent });
- if (requires_grad) {
+ if (compute_requires_grad({ self, exponent })) {
grad_fn = std::make_shared<PowBackward1>();
grad_fn->next_functions = compute_next_functions({ self, exponent });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->exponent_ = SavedVariable(exponent, false);
}
- auto ret = as_variable(baseType->s_pow(self_, exponent_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_pow(self_, exponent_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, exponent )) {
- jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::pow_(Tensor & self, Scalar exponent) const {
profiler::RecordFunction profiler("pow_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<PowBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PowBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -3316,7 +3338,7 @@
}
baseType->pow_(self_, exponent);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { self } );
setattr(n, jit::stringToSymbol("exponent"), exponent);
@@ -3329,8 +3351,7 @@
auto& exponent_ = unpack(exponent, "exponent", 1);
check_inplace(self);
std::shared_ptr<PowBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, exponent });
- if (requires_grad) {
+ if (compute_requires_grad({ self, exponent })) {
grad_fn = std::make_shared<PowBackward1>();
grad_fn->next_functions = compute_next_functions({ self, exponent });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -3338,7 +3359,7 @@
}
baseType->s_pow_(self_, exponent_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, exponent )) {
jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { self } );
(void)n;
@@ -3350,19 +3371,18 @@
auto& self_ = unpack(self, "self", 0);
auto& end_ = unpack(end, "end", 1);
std::shared_ptr<LerpBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, end });
- if (requires_grad) {
+ if (compute_requires_grad({ self, end })) {
grad_fn = std::make_shared<LerpBackward>();
grad_fn->next_functions = compute_next_functions({ self, end });
grad_fn->weight = weight;
}
- auto ret = as_variable(baseType->s_lerp(self_, end_, weight));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_lerp(self_, end_, weight));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, end )) {
- jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { result } );
setattr(n, jit::stringToSymbol("weight"), weight);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::s_lerp_(Tensor & self, const Tensor & end, Scalar weight) const {
profiler::RecordFunction profiler("lerp_");
@@ -3370,15 +3390,14 @@
auto& end_ = unpack(end, "end", 1);
check_inplace(self);
std::shared_ptr<LerpBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, end });
- if (requires_grad) {
+ if (compute_requires_grad({ self, end })) {
grad_fn = std::make_shared<LerpBackward>();
grad_fn->next_functions = compute_next_functions({ self, end });
grad_fn->weight = weight;
}
baseType->s_lerp_(self_, end_, weight);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, end )) {
jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { self } );
setattr(n, jit::stringToSymbol("weight"), weight);
@@ -3386,39 +3405,43 @@
return self;
}
Tensor VariableType::linspace(Scalar start, Scalar end, int64_t steps) const {
- return as_variable(baseType->linspace(start, end, steps));
+ profiler::RecordFunction profiler("linspace");
+ auto result = as_variable(baseType->linspace(start, end, steps));
+ return result;
}
Tensor VariableType::logspace(Scalar start, Scalar end, int64_t steps) const {
- return as_variable(baseType->logspace(start, end, steps));
+ profiler::RecordFunction profiler("logspace");
+ auto result = as_variable(baseType->logspace(start, end, steps));
+ return result;
}
Tensor VariableType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const {
profiler::RecordFunction profiler("histc");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<HistcBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<HistcBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->histc(self_, bins, min, max));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->histc(self_, bins, min, max));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "histc", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "histc", { self }, { result } );
setattr(n, jit::stringToSymbol("bins"), bins);
setattr(n, jit::stringToSymbol("min"), min);
setattr(n, jit::stringToSymbol("max"), max);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::zero_(Tensor & self) const {
profiler::RecordFunction profiler("zero_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ZeroBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ZeroBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->zero_(self_);
increment_version(self);
@@ -3422,7 +3445,7 @@
}
baseType->zero_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "zero", { self }, { self } );
(void)n;
@@ -3433,151 +3456,143 @@
profiler::RecordFunction profiler("sum");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SumBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SumBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->sum(self_, dim, keepdim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sum(self_, dim, keepdim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::sum(const Tensor & self) const {
profiler::RecordFunction profiler("sum");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SumBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SumBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
}
- auto ret = as_variable(baseType->sum(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sum(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::prod(const Tensor & self, int64_t dim, bool keepdim) const {
profiler::RecordFunction profiler("prod");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ProdBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ProdBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
grad_fn->keepdim = keepdim;
}
- auto ret = as_variable(baseType->prod(self_, dim, keepdim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->prod(self_, dim, keepdim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("keepdim"), keepdim);
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::prod(const Tensor & self) const {
profiler::RecordFunction profiler("prod");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ProdBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ProdBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->prod(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->prod(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { result } );
(void)n;
}
if (grad_fn) {
- auto& result = ret;
grad_fn->result_ = SavedVariable(result, true);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cumsum(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("cumsum");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CumsumBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CumsumBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->cumsum(self_, dim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cumsum(self_, dim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "cumsum", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cumsum", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cumprod(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("cumprod");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CumprodBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CumprodBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->cumprod(self_, dim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cumprod(self_, dim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "cumprod", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cumprod", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::sign(const Tensor & self) const {
profiler::RecordFunction profiler("sign");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SignBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SignBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->sign(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sign(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::sign_(Tensor & self) const {
profiler::RecordFunction profiler("sign_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SignBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SignBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->sign_(self_);
increment_version(self);
@@ -3581,7 +3596,7 @@
}
baseType->sign_(self_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { self } );
(void)n;
@@ -3592,69 +3607,83 @@
profiler::RecordFunction profiler("trace");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TraceBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TraceBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
}
- auto ret = as_variable(baseType->trace(self_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->trace(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "trace", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "trace", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::add(const Tensor & self, Scalar other, Scalar alpha) const {
profiler::RecordFunction profiler("add");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AddBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AddBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->add(self_, other, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->add(self_, other, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "add", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "add", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_add(const Tensor & self, const Tensor & other, Scalar alpha) const {
profiler::RecordFunction profiler("add");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<AddBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<AddBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->alpha = alpha;
}
- auto ret = as_variable(baseType->s_add(self_, other_, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_add(self_, other_, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { result } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::add(const Tensor & self, SparseTensor other, Scalar alpha) const {
- throw std::runtime_error("VariableType::add NYI");
+ profiler::RecordFunction profiler("add");
+ auto& self_ = unpack(self, "self", 0);
+ auto other_ = unpack(other, "other", 1);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for add is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ auto result = as_variable(baseType->add(self_, other_, alpha));
+ set_history(result, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "add", { self }, { result } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ setattr(n, jit::stringToSymbol("alpha"), alpha);
+ }
+ return result;
}
Tensor & VariableType::add_(Tensor & self, Scalar other, Scalar alpha) const {
profiler::RecordFunction profiler("add_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<AddBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AddBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->add_(self_, other, alpha);
increment_version(self);
@@ -3658,7 +3687,7 @@
}
baseType->add_(self_, other, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -3672,15 +3701,14 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<AddBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<AddBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->alpha = alpha;
}
baseType->s_add_(self_, other_, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { self } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
@@ -3688,54 +3716,71 @@
return self;
}
Tensor & VariableType::add_(Tensor & self, SparseTensor other, Scalar alpha) const {
- throw std::runtime_error("VariableType::add_ NYI");
+ profiler::RecordFunction profiler("add_");
+ auto& self_ = unpack(self, "self", 0);
+ auto other_ = unpack(other, "other", 1);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for add_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ baseType->add_(self_, other_, alpha);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "add", { self }, { self } );
+ setattr(n, jit::stringToSymbol("other"), other);
+ setattr(n, jit::stringToSymbol("alpha"), alpha);
+ }
+ return self;
}
Tensor VariableType::sub(const Tensor & self, Scalar other, Scalar alpha) const {
profiler::RecordFunction profiler("sub");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SubBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SubBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->sub(self_, other, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->sub(self_, other, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_sub(const Tensor & self, const Tensor & other, Scalar alpha) const {
profiler::RecordFunction profiler("sub");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<SubBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<SubBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->alpha = alpha;
}
- auto ret = as_variable(baseType->s_sub(self_, other_, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_sub(self_, other_, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { result } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::sub_(Tensor & self, Scalar other, Scalar alpha) const {
profiler::RecordFunction profiler("sub_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SubBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SubBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->sub_(self_, other, alpha);
increment_version(self);
@@ -3739,7 +3784,7 @@
}
baseType->sub_(self_, other, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -3753,15 +3798,14 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<SubBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<SubBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->alpha = alpha;
}
baseType->s_sub_(self_, other_, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { self } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
@@ -3772,54 +3816,51 @@
profiler::RecordFunction profiler("mul");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MulBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MulBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->other = other;
}
- auto ret = as_variable(baseType->mul(self_, other));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->mul(self_, other));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_mul(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("mul");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<MulBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<MulBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->s_mul(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_mul(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::mul_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("mul_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<MulBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MulBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->other = other;
}
baseType->mul_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -3832,8 +3873,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<MulBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<MulBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -3841,7 +3881,7 @@
}
baseType->s_mul_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { self } );
(void)n;
@@ -3852,54 +3892,51 @@
profiler::RecordFunction profiler("div");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<DivBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<DivBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->other = other;
}
- auto ret = as_variable(baseType->div(self_, other));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->div(self_, other));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "div", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "div", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_div(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("div");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<DivBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<DivBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->s_div(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_div(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::div_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("div_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<DivBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<DivBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->other = other;
}
baseType->div_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "div", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -3912,8 +3949,7 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<DivBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<DivBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -3921,7 +3957,7 @@
}
baseType->s_div_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { self } );
(void)n;
@@ -3932,47 +3968,46 @@
profiler::RecordFunction profiler("fmod");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FmodBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FmodBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->fmod(self_, other));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->fmod(self_, other));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_fmod(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("fmod");
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<FmodBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<FmodBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->s_fmod(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_fmod(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::fmod_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("fmod_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<FmodBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FmodBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->fmod_(self_, other);
increment_version(self);
@@ -3976,7 +4011,7 @@
}
baseType->fmod_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -3989,15 +4024,14 @@
auto& other_ = unpack(other, "other", 1);
check_inplace(self);
std::shared_ptr<FmodBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<FmodBackward1>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->other_ = SavedVariable(other, false);
}
baseType->s_fmod_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { self } );
(void)n;
@@ -4008,18 +4042,18 @@
profiler::RecordFunction profiler("remainder");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<RemainderBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RemainderBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->remainder(self_, other));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->remainder(self_, other));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { result } );
setattr(n, jit::stringToSymbol("other"), other);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_remainder(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("remainder");
@@ -4027,28 +4061,28 @@
auto& other_ = unpack(other, "other", 1);
check_no_requires_grad(other, "other");
std::shared_ptr<RemainderBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RemainderBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->s_remainder(self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_remainder(self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::remainder_(Tensor & self, Scalar other) const {
profiler::RecordFunction profiler("remainder_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RemainderBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RemainderBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->remainder_(self_, other);
increment_version(self);
@@ -4052,7 +4086,7 @@
}
baseType->remainder_(self_, other);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { self } );
setattr(n, jit::stringToSymbol("other"), other);
@@ -4066,10 +4100,10 @@
check_inplace(self);
check_no_requires_grad(other, "other");
std::shared_ptr<RemainderBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RemainderBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->s_remainder_(self_, other_);
increment_version(self);
@@ -4073,7 +4107,7 @@
}
baseType->s_remainder_(self_, other_);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, other )) {
jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { self } );
(void)n;
@@ -4084,30 +4118,28 @@
profiler::RecordFunction profiler("clamp");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ClampBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ClampBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->min = min;
grad_fn->max = max;
}
- auto ret = as_variable(baseType->clamp(self_, min, max));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->clamp(self_, min, max));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { result } );
setattr(n, jit::stringToSymbol("min"), min);
setattr(n, jit::stringToSymbol("max"), max);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::clamp_(Tensor & self, Scalar min, Scalar max) const {
profiler::RecordFunction profiler("clamp_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ClampBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ClampBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -4116,7 +4148,7 @@
}
baseType->clamp_(self_, min, max);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { self } );
setattr(n, jit::stringToSymbol("min"), min);
@@ -4128,28 +4160,26 @@
profiler::RecordFunction profiler("clamp_min");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ClampMinBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ClampMinBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->min = min;
}
- auto ret = as_variable(baseType->clamp_min(self_, min));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->clamp_min(self_, min));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { result } );
setattr(n, jit::stringToSymbol("min"), min);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::clamp_min_(Tensor & self, Scalar min) const {
profiler::RecordFunction profiler("clamp_min_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ClampMinBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ClampMinBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -4157,7 +4187,7 @@
}
baseType->clamp_min_(self_, min);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { self } );
setattr(n, jit::stringToSymbol("min"), min);
@@ -4168,28 +4198,26 @@
profiler::RecordFunction profiler("clamp_max");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ClampMaxBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ClampMaxBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->max = max;
}
- auto ret = as_variable(baseType->clamp_max(self_, max));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->clamp_max(self_, max));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { result } );
setattr(n, jit::stringToSymbol("max"), max);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::clamp_max_(Tensor & self, Scalar max) const {
profiler::RecordFunction profiler("clamp_max_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ClampMaxBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ClampMaxBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self.clone(), false);
@@ -4197,7 +4225,7 @@
}
baseType->clamp_max_(self_, max);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { self } );
setattr(n, jit::stringToSymbol("max"), max);
@@ -4209,53 +4237,50 @@
auto& self_ = unpack(self, "self", 0);
auto& tensor_ = unpack(tensor, "tensor", 1);
std::shared_ptr<DotBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, tensor });
- if (requires_grad) {
+ if (compute_requires_grad({ self, tensor })) {
grad_fn = std::make_shared<DotBackward>();
grad_fn->next_functions = compute_next_functions({ self, tensor });
grad_fn->tensor_ = SavedVariable(tensor, false);
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->dot(self_, tensor_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->dot(self_, tensor_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, tensor )) {
- jit::Node *n = jit::tracer::recordTrace( "dot", { self, tensor }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "dot", { self, tensor }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::tril(const Tensor & self, int64_t diagonal) const {
profiler::RecordFunction profiler("tril");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TrilBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TrilBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->diagonal = diagonal;
}
- auto ret = as_variable(baseType->tril(self_, diagonal));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->tril(self_, diagonal));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { result } );
setattr(n, jit::stringToSymbol("diagonal"), diagonal);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::tril_(Tensor & self, int64_t diagonal) const {
profiler::RecordFunction profiler("tril_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TrilBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TrilBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->diagonal = diagonal;
}
baseType->tril_(self_, diagonal);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { self } );
setattr(n, jit::stringToSymbol("diagonal"), diagonal);
@@ -4266,34 +4291,32 @@
profiler::RecordFunction profiler("triu");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TriuBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TriuBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->diagonal = diagonal;
}
- auto ret = as_variable(baseType->triu(self_, diagonal));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->triu(self_, diagonal));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { result } );
setattr(n, jit::stringToSymbol("diagonal"), diagonal);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::triu_(Tensor & self, int64_t diagonal) const {
profiler::RecordFunction profiler("triu_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<TriuBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<TriuBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->diagonal = diagonal;
}
baseType->triu_(self_, diagonal);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { self } );
setattr(n, jit::stringToSymbol("diagonal"), diagonal);
@@ -4305,42 +4328,42 @@
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
std::shared_ptr<CrossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ self, other })) {
grad_fn = std::make_shared<CrossBackward>();
grad_fn->next_functions = compute_next_functions({ self, other });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
grad_fn->other_ = SavedVariable(other, false);
}
- auto ret = as_variable(baseType->cross(self_, other_, dim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cross(self_, other_, dim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "cross", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cross", { self, other }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::eye(int64_t n, int64_t m) const {
- return as_variable(baseType->eye(n, m));
+ profiler::RecordFunction profiler("eye");
+ auto result = as_variable(baseType->eye(n, m));
+ return result;
}
Tensor VariableType::diag(const Tensor & self, int64_t diagonal) const {
profiler::RecordFunction profiler("diag");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<DiagBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<DiagBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->diagonal = diagonal;
}
- auto ret = as_variable(baseType->diag(self_, diagonal));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->diag(self_, diagonal));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "diag", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "diag", { self }, { result } );
setattr(n, jit::stringToSymbol("diagonal"), diagonal);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("addmm");
@@ -4348,8 +4371,7 @@
auto& mat1_ = unpack(mat1, "mat1", 1);
auto& mat2_ = unpack(mat2, "mat2", 2);
std::shared_ptr<AddmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, mat1, mat2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, mat1, mat2 })) {
grad_fn = std::make_shared<AddmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 });
grad_fn->mat1_sizes = mat1.sizes();
@@ -4359,14 +4381,14 @@
grad_fn->mat2_sizes = mat2.sizes();
grad_fn->beta = beta;
}
- auto ret = as_variable(baseType->s_addmm(self_, mat1_, mat2_, beta, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_addmm(self_, mat1_, mat2_, beta, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, mat1, mat2 )) {
- jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { result } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("addmm_");
@@ -4375,8 +4397,7 @@
auto& mat2_ = unpack(mat2, "mat2", 2);
check_inplace(self);
std::shared_ptr<AddmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, mat1, mat2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, mat1, mat2 })) {
grad_fn = std::make_shared<AddmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 });
grad_fn->mat1_sizes = mat1.sizes();
@@ -4388,7 +4409,7 @@
}
baseType->addmm_(self_, mat1_, mat2_, beta, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, mat1, mat2 )) {
jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { self } );
setattr(n, jit::stringToSymbol("beta"), beta);
@@ -4402,8 +4423,7 @@
auto& mat_ = unpack(mat, "mat", 1);
auto& vec_ = unpack(vec, "vec", 2);
std::shared_ptr<AddmvBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, mat, vec });
- if (requires_grad) {
+ if (compute_requires_grad({ self, mat, vec })) {
grad_fn = std::make_shared<AddmvBackward>();
grad_fn->next_functions = compute_next_functions({ self, mat, vec });
grad_fn->vec_ = SavedVariable(vec, false);
@@ -4411,14 +4431,14 @@
grad_fn->beta = beta;
grad_fn->mat_ = SavedVariable(mat, false);
}
- auto ret = as_variable(baseType->s_addmv(self_, mat_, vec_, beta, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_addmv(self_, mat_, vec_, beta, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, mat, vec )) {
- jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { result } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("addmv_");
@@ -4427,8 +4447,7 @@
auto& vec_ = unpack(vec, "vec", 2);
check_inplace(self);
std::shared_ptr<AddmvBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, mat, vec });
- if (requires_grad) {
+ if (compute_requires_grad({ self, mat, vec })) {
grad_fn = std::make_shared<AddmvBackward>();
grad_fn->next_functions = compute_next_functions({ self, mat, vec });
grad_fn->vec_ = SavedVariable(vec, false);
@@ -4438,7 +4457,7 @@
}
baseType->addmv_(self_, mat_, vec_, beta, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, mat, vec )) {
jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { self } );
setattr(n, jit::stringToSymbol("beta"), beta);
@@ -4452,8 +4471,7 @@
auto& vec1_ = unpack(vec1, "vec1", 1);
auto& vec2_ = unpack(vec2, "vec2", 2);
std::shared_ptr<AddrBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, vec1, vec2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, vec1, vec2 })) {
grad_fn = std::make_shared<AddrBackward>();
grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 });
grad_fn->beta = beta;
@@ -4461,14 +4479,14 @@
grad_fn->alpha = alpha;
grad_fn->vec1_ = SavedVariable(vec1, false);
}
- auto ret = as_variable(baseType->s_addr(self_, vec1_, vec2_, beta, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_addr(self_, vec1_, vec2_, beta, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, vec1, vec2 )) {
- jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { result } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("addr_");
@@ -4477,8 +4495,7 @@
auto& vec2_ = unpack(vec2, "vec2", 2);
check_inplace(self);
std::shared_ptr<AddrBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, vec1, vec2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, vec1, vec2 })) {
grad_fn = std::make_shared<AddrBackward>();
grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 });
grad_fn->beta = beta;
@@ -4488,7 +4505,7 @@
}
baseType->addr_(self_, vec1_, vec2_, beta, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, vec1, vec2 )) {
jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { self } );
setattr(n, jit::stringToSymbol("beta"), beta);
@@ -4501,48 +4518,45 @@
auto& self_ = unpack(self, "self", 0);
auto& vec2_ = unpack(vec2, "vec2", 1);
std::shared_ptr<GerBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, vec2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, vec2 })) {
grad_fn = std::make_shared<GerBackward>();
grad_fn->next_functions = compute_next_functions({ self, vec2 });
grad_fn->vec2_ = SavedVariable(vec2, false);
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->ger(self_, vec2_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->ger(self_, vec2_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, vec2 )) {
- jit::Node *n = jit::tracer::recordTrace( "ger", { self, vec2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ger", { self, vec2 }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::mv(const Tensor & self, const Tensor & vec) const {
profiler::RecordFunction profiler("mv");
auto& self_ = unpack(self, "self", 0);
auto& vec_ = unpack(vec, "vec", 1);
std::shared_ptr<MvBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, vec });
- if (requires_grad) {
+ if (compute_requires_grad({ self, vec })) {
grad_fn = std::make_shared<MvBackward>();
grad_fn->next_functions = compute_next_functions({ self, vec });
grad_fn->vec_ = SavedVariable(vec, false);
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->mv(self_, vec_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->mv(self_, vec_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, vec )) {
- jit::Node *n = jit::tracer::recordTrace( "mv", { self, vec }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mv", { self, vec }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::mm(const Tensor & self, const Tensor & mat2) const {
profiler::RecordFunction profiler("mm");
auto& self_ = unpack(self, "self", 0);
auto& mat2_ = unpack(mat2, "mat2", 1);
std::shared_ptr<MmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, mat2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, mat2 })) {
grad_fn = std::make_shared<MmBackward>();
grad_fn->next_functions = compute_next_functions({ self, mat2 });
grad_fn->self_ = SavedVariable(self, false);
@@ -4550,33 +4564,32 @@
grad_fn->mat2_ = SavedVariable(mat2, false);
grad_fn->self_sizes = self.sizes();
}
- auto ret = as_variable(baseType->mm(self_, mat2_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->mm(self_, mat2_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, mat2 )) {
- jit::Node *n = jit::tracer::recordTrace( "mm", { self, mat2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mm", { self, mat2 }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::bmm(const Tensor & self, const Tensor & mat2) const {
profiler::RecordFunction profiler("bmm");
auto& self_ = unpack(self, "self", 0);
auto& mat2_ = unpack(mat2, "mat2", 1);
std::shared_ptr<BmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, mat2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, mat2 })) {
grad_fn = std::make_shared<BmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, mat2 });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->mat2_ = SavedVariable(mat2, false);
}
- auto ret = as_variable(baseType->bmm(self_, mat2_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->bmm(self_, mat2_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, mat2 )) {
- jit::Node *n = jit::tracer::recordTrace( "bmm", { self, mat2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "bmm", { self, mat2 }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::s_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("addbmm");
@@ -4584,8 +4597,7 @@
auto& batch1_ = unpack(batch1, "batch1", 1);
auto& batch2_ = unpack(batch2, "batch2", 2);
std::shared_ptr<AddbmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, batch1, batch2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, batch1, batch2 })) {
grad_fn = std::make_shared<AddbmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 });
grad_fn->batch1_argsize_0 = batch1.size(0);
@@ -4596,14 +4608,14 @@
grad_fn->batch1_ = SavedVariable(batch1, false);
grad_fn->beta = beta;
}
- auto ret = as_variable(baseType->s_addbmm(self_, batch1_, batch2_, beta, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_addbmm(self_, batch1_, batch2_, beta, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, batch1, batch2 )) {
- jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { result } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("addbmm_");
@@ -4612,8 +4624,7 @@
auto& batch2_ = unpack(batch2, "batch2", 2);
check_inplace(self);
std::shared_ptr<AddbmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, batch1, batch2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, batch1, batch2 })) {
grad_fn = std::make_shared<AddbmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 });
grad_fn->batch1_argsize_0 = batch1.size(0);
@@ -4626,7 +4637,7 @@
}
baseType->addbmm_(self_, batch1_, batch2_, beta, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, batch1, batch2 )) {
jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { self } );
setattr(n, jit::stringToSymbol("beta"), beta);
@@ -4640,8 +4651,7 @@
auto& batch1_ = unpack(batch1, "batch1", 1);
auto& batch2_ = unpack(batch2, "batch2", 2);
std::shared_ptr<BaddbmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, batch1, batch2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, batch1, batch2 })) {
grad_fn = std::make_shared<BaddbmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 });
grad_fn->batch2_ = SavedVariable(batch2, false);
@@ -4649,14 +4659,14 @@
grad_fn->batch1_ = SavedVariable(batch1, false);
grad_fn->beta = beta;
}
- auto ret = as_variable(baseType->s_baddbmm(self_, batch1_, batch2_, beta, alpha));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_baddbmm(self_, batch1_, batch2_, beta, alpha));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, batch1, batch2 )) {
- jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { result } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("alpha"), alpha);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const {
profiler::RecordFunction profiler("baddbmm_");
@@ -4665,8 +4675,7 @@
auto& batch2_ = unpack(batch2, "batch2", 2);
check_inplace(self);
std::shared_ptr<BaddbmmBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, batch1, batch2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, batch1, batch2 })) {
grad_fn = std::make_shared<BaddbmmBackward>();
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 });
grad_fn->batch2_ = SavedVariable(batch2, false);
@@ -4676,7 +4685,7 @@
}
baseType->baddbmm_(self_, batch1_, batch2_, beta, alpha);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, batch1, batch2 )) {
jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { self } );
setattr(n, jit::stringToSymbol("beta"), beta);
@@ -4690,21 +4699,20 @@
auto& tensor1_ = unpack(tensor1, "tensor1", 1);
auto& tensor2_ = unpack(tensor2, "tensor2", 2);
std::shared_ptr<AddcmulBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, tensor1, tensor2 })) {
grad_fn = std::make_shared<AddcmulBackward>();
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 });
grad_fn->tensor2_ = SavedVariable(tensor2, false);
grad_fn->value = value;
grad_fn->tensor1_ = SavedVariable(tensor1, false);
}
- auto ret = as_variable(baseType->s_addcmul(self_, tensor1_, tensor2_, value));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_addcmul(self_, tensor1_, tensor2_, value));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, tensor1, tensor2 )) {
- jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { result } );
setattr(n, jit::stringToSymbol("value"), value);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::s_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
profiler::RecordFunction profiler("addcmul_");
@@ -4713,8 +4721,7 @@
auto& tensor2_ = unpack(tensor2, "tensor2", 2);
check_inplace(self);
std::shared_ptr<AddcmulBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, tensor1, tensor2 })) {
grad_fn = std::make_shared<AddcmulBackward>();
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 });
grad_fn->tensor2_ = SavedVariable(tensor2, false);
@@ -4723,7 +4730,7 @@
}
baseType->s_addcmul_(self_, tensor1_, tensor2_, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, tensor1, tensor2 )) {
jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { self } );
setattr(n, jit::stringToSymbol("value"), value);
@@ -4736,21 +4743,20 @@
auto& tensor1_ = unpack(tensor1, "tensor1", 1);
auto& tensor2_ = unpack(tensor2, "tensor2", 2);
std::shared_ptr<AddcdivBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, tensor1, tensor2 })) {
grad_fn = std::make_shared<AddcdivBackward>();
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 });
grad_fn->tensor2_ = SavedVariable(tensor2, false);
grad_fn->value = value;
grad_fn->tensor1_ = SavedVariable(tensor1, false);
}
- auto ret = as_variable(baseType->s_addcdiv(self_, tensor1_, tensor2_, value));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->s_addcdiv(self_, tensor1_, tensor2_, value));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, tensor1, tensor2 )) {
- jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { result } );
setattr(n, jit::stringToSymbol("value"), value);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::s_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const {
profiler::RecordFunction profiler("addcdiv_");
@@ -4759,8 +4765,7 @@
auto& tensor2_ = unpack(tensor2, "tensor2", 2);
check_inplace(self);
std::shared_ptr<AddcdivBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, tensor1, tensor2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, tensor1, tensor2 })) {
grad_fn = std::make_shared<AddcdivBackward>();
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 });
grad_fn->tensor2_ = SavedVariable(tensor2, false);
@@ -4769,7 +4774,7 @@
}
baseType->s_addcdiv_(self_, tensor1_, tensor2_, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, tensor1, tensor2 )) {
jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { self } );
setattr(n, jit::stringToSymbol("value"), value);
@@ -4781,49 +4786,48 @@
auto& self_ = unpack(self, "self", 0);
auto& A_ = unpack(A, "A", 1);
std::shared_ptr<GesvBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, A });
- if (requires_grad) {
+ if (compute_requires_grad({ self, A })) {
grad_fn = std::make_shared<GesvBackward>();
grad_fn->next_functions = compute_next_functions({ self, A });
grad_fn->A_ = SavedVariable(A, false);
}
- auto ret = as_variable(baseType->gesv(self_, A_));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor solution, lu;
+ std::tie(solution, lu) = as_variable(baseType->gesv(self_, A_));
+ set_history(solution, grad_fn);
if (jit::tracer::isTracing( self, A )) {
- jit::Node *n = jit::tracer::recordTrace( "gesv", { self, A }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "gesv", { self, A }, { solution, lu } );
(void)n;
}
if (grad_fn) {
- auto& solution = std::get<0>(ret);
grad_fn->solution_ = SavedVariable(solution, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(solution), std::move(lu));
}
std::tuple<Tensor,Tensor> VariableType::gels(const Tensor & self, const Tensor & A) const {
profiler::RecordFunction profiler("gels");
auto& self_ = unpack(self, "self", 0);
auto& A_ = unpack(A, "A", 1);
std::shared_ptr<GelsBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, A });
- if (requires_grad) {
+ if (compute_requires_grad({ self, A })) {
grad_fn = std::make_shared<GelsBackward>();
grad_fn->next_functions = compute_next_functions({ self, A });
+
}
- auto ret = as_variable(baseType->gels(self_, A_));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->gels(self_, A_));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self, A )) {
- jit::Node *n = jit::tracer::recordTrace( "gels", { self, A }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "gels", { self, A }, { res1, res2 } );
(void)n;
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
std::tuple<Tensor,Tensor> VariableType::trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) const {
profiler::RecordFunction profiler("trtrs");
auto& self_ = unpack(self, "self", 0);
auto& A_ = unpack(A, "A", 1);
std::shared_ptr<TrtrsBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, A });
- if (requires_grad) {
+ if (compute_requires_grad({ self, A })) {
grad_fn = std::make_shared<TrtrsBackward>();
grad_fn->next_functions = compute_next_functions({ self, A });
grad_fn->self_ = SavedVariable(self, false);
@@ -4832,229 +4836,228 @@
grad_fn->transpose = transpose;
grad_fn->unitriangular = unitriangular;
}
- auto ret = as_variable(baseType->trtrs(self_, A_, upper, transpose, unitriangular));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->trtrs(self_, A_, upper, transpose, unitriangular));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self, A )) {
- jit::Node *n = jit::tracer::recordTrace( "trtrs", { self, A }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "trtrs", { self, A }, { res1, res2 } );
setattr(n, jit::stringToSymbol("upper"), upper);
setattr(n, jit::stringToSymbol("transpose"), transpose);
setattr(n, jit::stringToSymbol("unitriangular"), unitriangular);
}
if (grad_fn) {
- auto& res1 = std::get<0>(ret);
grad_fn->res1_ = SavedVariable(res1, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
std::tuple<Tensor,Tensor> VariableType::symeig(const Tensor & self, bool eigenvectors, bool upper) const {
profiler::RecordFunction profiler("symeig");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SymeigBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SymeigBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->symeig(self_, eigenvectors, upper));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->symeig(self_, eigenvectors, upper));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "symeig", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "symeig", { self }, { res1, res2 } );
setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors);
setattr(n, jit::stringToSymbol("upper"), upper);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
std::tuple<Tensor,Tensor> VariableType::eig(const Tensor & self, bool eigenvectors) const {
profiler::RecordFunction profiler("eig");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<EigBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<EigBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->eig(self_, eigenvectors));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->eig(self_, eigenvectors));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "eig", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "eig", { self }, { res1, res2 } );
setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::svd(const Tensor & self, bool some) const {
profiler::RecordFunction profiler("svd");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SvdBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SvdBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->some = some;
}
- auto ret = as_variable(baseType->svd(self_, some));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor res1, res2, res3;
+ std::tie(res1, res2, res3) = as_variable(baseType->svd(self_, some));
+ set_history({ res1, res2, res3 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "svd", { self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "svd", { self }, { res1, res2, res3 } );
setattr(n, jit::stringToSymbol("some"), some);
}
if (grad_fn) {
- auto& res1 = std::get<0>(ret);
grad_fn->res1_ = SavedVariable(res1, true);
- auto& res2 = std::get<1>(ret);
grad_fn->res2_ = SavedVariable(res2, true);
- auto& res3 = std::get<2>(ret);
grad_fn->res3_ = SavedVariable(res3, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2), std::move(res3));
}
Tensor VariableType::inverse(const Tensor & self) const {
profiler::RecordFunction profiler("inverse");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<InverseBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<InverseBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->inverse(self_));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->inverse(self_));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "inverse", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "inverse", { self }, { output } );
(void)n;
}
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::potrf(const Tensor & self, bool upper) const {
profiler::RecordFunction profiler("potrf");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PotrfBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PotrfBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->upper = upper;
}
- auto ret = as_variable(baseType->potrf(self_, upper));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->potrf(self_, upper));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "potrf", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "potrf", { self }, { output } );
setattr(n, jit::stringToSymbol("upper"), upper);
}
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::potrs(const Tensor & self, const Tensor & input2, bool upper) const {
profiler::RecordFunction profiler("potrs");
auto& self_ = unpack(self, "self", 0);
auto& input2_ = unpack(input2, "input2", 1);
std::shared_ptr<PotrsBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, input2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, input2 })) {
grad_fn = std::make_shared<PotrsBackward>();
grad_fn->next_functions = compute_next_functions({ self, input2 });
+
}
- auto ret = as_variable(baseType->potrs(self_, input2_, upper));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->potrs(self_, input2_, upper));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, input2 )) {
- jit::Node *n = jit::tracer::recordTrace( "potrs", { self, input2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "potrs", { self, input2 }, { result } );
setattr(n, jit::stringToSymbol("upper"), upper);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::potri(const Tensor & self, bool upper) const {
profiler::RecordFunction profiler("potri");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PotriBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PotriBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->potri(self_, upper));
- set_history({ ret }, grad_fn);
+ auto output = as_variable(baseType->potri(self_, upper));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "potri", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "potri", { self }, { output } );
setattr(n, jit::stringToSymbol("upper"), upper);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::pstrf(const Tensor & self, bool upper, Scalar tol) const {
profiler::RecordFunction profiler("pstrf");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PstrfBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PstrfBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->pstrf(self_, upper, tol));
- set_history({ std::get<0>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->pstrf(self_, upper, tol));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "pstrf", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "pstrf", { self }, { res1, res2 } );
setattr(n, jit::stringToSymbol("upper"), upper);
setattr(n, jit::stringToSymbol("tol"), tol);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
std::tuple<Tensor,Tensor> VariableType::qr(const Tensor & self) const {
profiler::RecordFunction profiler("qr");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<QrBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<QrBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->qr(self_));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->qr(self_));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "qr", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "qr", { self }, { res1, res2 } );
(void)n;
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
std::tuple<Tensor,Tensor> VariableType::geqrf(const Tensor & self) const {
profiler::RecordFunction profiler("geqrf");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<GeqrfBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<GeqrfBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->geqrf(self_));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor res1, res2;
+ std::tie(res1, res2) = as_variable(baseType->geqrf(self_));
+ set_history({ res1, res2 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "geqrf", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "geqrf", { self }, { res1, res2 } );
(void)n;
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(res1), std::move(res2));
}
Tensor VariableType::orgqr(const Tensor & self, const Tensor & input2) const {
profiler::RecordFunction profiler("orgqr");
auto& self_ = unpack(self, "self", 0);
auto& input2_ = unpack(input2, "input2", 1);
std::shared_ptr<OrgqrBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, input2 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, input2 })) {
grad_fn = std::make_shared<OrgqrBackward>();
grad_fn->next_functions = compute_next_functions({ self, input2 });
+
}
- auto ret = as_variable(baseType->orgqr(self_, input2_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->orgqr(self_, input2_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, input2 )) {
- jit::Node *n = jit::tracer::recordTrace( "orgqr", { self, input2 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "orgqr", { self, input2 }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) const {
profiler::RecordFunction profiler("ormqr");
@@ -5062,53 +5065,55 @@
auto& input2_ = unpack(input2, "input2", 1);
auto& input3_ = unpack(input3, "input3", 2);
std::shared_ptr<OrmqrBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, input2, input3 });
- if (requires_grad) {
+ if (compute_requires_grad({ self, input2, input3 })) {
grad_fn = std::make_shared<OrmqrBackward>();
grad_fn->next_functions = compute_next_functions({ self, input2, input3 });
+
}
- auto ret = as_variable(baseType->ormqr(self_, input2_, input3_, left, transpose));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->ormqr(self_, input2_, input3_, left, transpose));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, input2, input3 )) {
- jit::Node *n = jit::tracer::recordTrace( "ormqr", { self, input2, input3 }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "ormqr", { self, input2, input3 }, { result } );
setattr(n, jit::stringToSymbol("left"), left);
setattr(n, jit::stringToSymbol("transpose"), transpose);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::btrifact(const Tensor & self, bool pivot) const {
profiler::RecordFunction profiler("btrifact");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<BtrifactBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<BtrifactBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->btrifact(self_, pivot));
- set_history({ std::get<0>(ret) }, grad_fn);
+ Tensor result, pivots;
+ std::tie(result, pivots) = as_variable(baseType->btrifact(self_, pivot));
+ set_history({ result, pivots }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "btrifact", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "btrifact", { self }, { result, pivots } );
setattr(n, jit::stringToSymbol("pivot"), pivot);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result), std::move(pivots));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::btrifact_with_info(const Tensor & self, bool pivot) const {
profiler::RecordFunction profiler("btrifact_with_info");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<BtrifactWithInfoBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<BtrifactWithInfoBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->btrifact_with_info(self_, pivot));
- set_history({ std::get<0>(ret) }, grad_fn);
+ Tensor result, pivots, info;
+ std::tie(result, pivots, info) = as_variable(baseType->btrifact_with_info(self_, pivot));
+ set_history({ result, pivots, info }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "btrifact_with_info", { self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "btrifact_with_info", { self }, { result, pivots, info } );
setattr(n, jit::stringToSymbol("pivot"), pivot);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result), std::move(pivots), std::move(info));
}
Tensor VariableType::btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) const {
profiler::RecordFunction profiler("btrisolve");
@@ -5118,31 +5123,33 @@
check_no_requires_grad(LU_data, "LU_data");
check_no_requires_grad(LU_pivots, "LU_pivots");
std::shared_ptr<BtrisolveBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<BtrisolveBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->btrisolve(self_, LU_data_, LU_pivots_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->btrisolve(self_, LU_data_, LU_pivots_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, LU_data, LU_pivots )) {
- jit::Node *n = jit::tracer::recordTrace( "btrisolve", { self, LU_data, LU_pivots }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "btrisolve", { self, LU_data, LU_pivots }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::randperm(int64_t n, Generator * generator) const {
- return as_variable(baseType->randperm(n, generator));
+ profiler::RecordFunction profiler("randperm");
+ auto result = as_variable(baseType->randperm(n, generator));
+ return result;
}
Tensor & VariableType::random_(Tensor & self, int64_t from, int64_t to, Generator * generator) const {
profiler::RecordFunction profiler("random_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RandomBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RandomBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->random_(self_, from, to, generator);
increment_version(self);
@@ -5146,7 +5153,7 @@
}
baseType->random_(self_, from, to, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::random_(Tensor & self, int64_t to, Generator * generator) const {
@@ -5154,10 +5161,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RandomBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RandomBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->random_(self_, to, generator);
increment_version(self);
@@ -5161,7 +5168,7 @@
}
baseType->random_(self_, to, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::random_(Tensor & self, Generator * generator) const {
@@ -5169,10 +5176,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<RandomBackward2> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RandomBackward2>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->random_(self_, generator);
increment_version(self);
@@ -5176,31 +5183,24 @@
}
baseType->random_(self_, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor VariableType::multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const {
profiler::RecordFunction profiler("multinomial");
auto& self_ = unpack(self, "self", 0);
- std::shared_ptr<MultinomialBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
- grad_fn = std::make_shared<MultinomialBackward>();
- grad_fn->next_functions = compute_next_functions({ self });
- }
- auto ret = as_variable(baseType->multinomial(self_, num_samples, replacement, generator));
- set_history(ret, grad_fn);
- return Tensor(std::move(ret));
+ auto result = as_variable(baseType->multinomial(self_, num_samples, replacement, generator));
+ return result;
}
Tensor & VariableType::uniform_(Tensor & self, double from, double to, Generator * generator) const {
profiler::RecordFunction profiler("uniform_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<UniformBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UniformBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->uniform_(self_, from, to, generator);
increment_version(self);
@@ -5204,62 +5204,59 @@
}
baseType->uniform_(self_, from, to, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor VariableType::normal(const Tensor & mean, double std, Generator * generator) const {
profiler::RecordFunction profiler("normal");
auto& mean_ = unpack(mean, "mean", 0);
std::shared_ptr<NormalBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ mean });
- if (requires_grad) {
+ if (compute_requires_grad({ mean })) {
grad_fn = std::make_shared<NormalBackward1>();
grad_fn->next_functions = compute_next_functions({ mean });
grad_fn->mean_sizes = mean.sizes();
}
- auto ret = as_variable(baseType->normal(mean_, std, generator));
- set_history(ret, grad_fn);
- return Tensor(std::move(ret));
+ auto output = as_variable(baseType->normal(mean_, std, generator));
+ set_history(output, grad_fn);
+ return output;
}
Tensor VariableType::normal(double mean, const Tensor & std, Generator * generator) const {
profiler::RecordFunction profiler("normal");
auto& std_ = unpack(std, "std", 1);
std::shared_ptr<NormalBackward2> grad_fn;
- auto requires_grad = compute_requires_grad({ std });
- if (requires_grad) {
+ if (compute_requires_grad({ std })) {
grad_fn = std::make_shared<NormalBackward2>();
grad_fn->next_functions = compute_next_functions({ std });
grad_fn->std_sizes = std.sizes();
}
- auto ret = as_variable(baseType->normal(mean, std_, generator));
- set_history(ret, grad_fn);
- return Tensor(std::move(ret));
+ auto output = as_variable(baseType->normal(mean, std_, generator));
+ set_history(output, grad_fn);
+ return output;
}
Tensor VariableType::normal(const Tensor & mean, const Tensor & std, Generator * generator) const {
profiler::RecordFunction profiler("normal");
auto& mean_ = unpack(mean, "mean", 0);
auto& std_ = unpack(std, "std", 1);
std::shared_ptr<NormalBackward3> grad_fn;
- auto requires_grad = compute_requires_grad({ mean, std });
- if (requires_grad) {
+ if (compute_requires_grad({ mean, std })) {
grad_fn = std::make_shared<NormalBackward3>();
grad_fn->next_functions = compute_next_functions({ mean, std });
grad_fn->mean_sizes = mean.sizes();
grad_fn->std_sizes = std.sizes();
}
- auto ret = as_variable(baseType->normal(mean_, std_, generator));
- set_history(ret, grad_fn);
- return Tensor(std::move(ret));
+ auto output = as_variable(baseType->normal(mean_, std_, generator));
+ set_history(output, grad_fn);
+ return output;
}
Tensor & VariableType::normal_(Tensor & self, double mean, double std, Generator * generator) const {
profiler::RecordFunction profiler("normal_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<NormalBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NormalBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->normal_(self_, mean, std, generator);
increment_version(self);
@@ -5263,7 +5260,7 @@
}
baseType->normal_(self_, mean, std, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::cauchy_(Tensor & self, double median, double sigma, Generator * generator) const {
@@ -5271,10 +5268,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<CauchyBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<CauchyBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->cauchy_(self_, median, sigma, generator);
increment_version(self);
@@ -5278,7 +5275,7 @@
}
baseType->cauchy_(self_, median, sigma, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::log_normal_(Tensor & self, double mean, double std, Generator * generator) const {
@@ -5286,10 +5283,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<LogNormalBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LogNormalBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->log_normal_(self_, mean, std, generator);
increment_version(self);
@@ -5293,7 +5290,7 @@
}
baseType->log_normal_(self_, mean, std, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor & VariableType::exponential_(Tensor & self, double lambd, Generator * generator) const {
@@ -5301,10 +5298,10 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ExponentialBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ExponentialBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->exponential_(self_, lambd, generator);
increment_version(self);
@@ -5308,24 +5305,28 @@
}
baseType->exponential_(self_, lambd, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor VariableType::rand(IntList size, Generator * generator) const {
- return as_variable(baseType->rand(size, generator));
+ profiler::RecordFunction profiler("rand");
+ auto result = as_variable(baseType->rand(size, generator));
+ return result;
}
Tensor VariableType::randn(IntList size, Generator * generator) const {
- return as_variable(baseType->randn(size, generator));
+ profiler::RecordFunction profiler("randn");
+ auto result = as_variable(baseType->randn(size, generator));
+ return result;
}
Tensor & VariableType::geometric_(Tensor & self, double p, Generator * generator) const {
profiler::RecordFunction profiler("geometric_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<GeometricBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<GeometricBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->geometric_(self_, p, generator);
increment_version(self);
@@ -5329,81 +5330,118 @@
}
baseType->geometric_(self_, p, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
return self;
}
Tensor VariableType::bernoulli(const Tensor & self, Generator * generator) const {
profiler::RecordFunction profiler("bernoulli");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<BernoulliBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<BernoulliBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->bernoulli(self_, generator));
- set_history(ret, grad_fn);
- return Tensor(std::move(ret));
+ auto output = as_variable(baseType->bernoulli(self_, generator));
+ set_history(output, grad_fn);
+ return output;
}
Tensor VariableType::_standard_gamma(const Tensor & self, Generator * generator) const {
profiler::RecordFunction profiler("_standard_gamma");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<StandardGammaBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<StandardGammaBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->_standard_gamma(self_, generator));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->_standard_gamma(self_, generator));
+ set_history(output, grad_fn);
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::tensor(Storage & storage, int64_t storageOffset, IntList size, IntList stride) const {
- return as_variable(baseType->tensor(storage, storageOffset, size, stride));
+ profiler::RecordFunction profiler("tensor");
+ auto result = as_variable(baseType->tensor(storage, storageOffset, size, stride));
+ return result;
}
Tensor VariableType::tensor(IntList size) const {
- return as_variable(baseType->tensor(size));
+ profiler::RecordFunction profiler("tensor");
+ auto result = as_variable(baseType->tensor(size));
+ return result;
}
Tensor VariableType::tensor(IntList size, IntList stride) const {
- return as_variable(baseType->tensor(size, stride));
+ profiler::RecordFunction profiler("tensor");
+ auto result = as_variable(baseType->tensor(size, stride));
+ return result;
}
Tensor VariableType::tensor() const {
- return as_variable(baseType->tensor());
+ profiler::RecordFunction profiler("tensor");
+ auto result = as_variable(baseType->tensor());
+ return result;
}
Tensor VariableType::sparse_coo_tensor(const Tensor & indices, const Tensor & values) const {
- throw std::runtime_error("VariableType::sparse_coo_tensor NYI");
+ profiler::RecordFunction profiler("sparse_coo_tensor");
+ auto& indices_ = unpack(indices, "indices", 0);
+ auto& values_ = unpack(values, "values", 1);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ indices, values })) {
+ grad_fn = std::make_shared<Error>("the derivative for sparse_coo_tensor is not implemented");
+ grad_fn->next_functions = compute_next_functions({ indices, values });
+
+ }
+ auto result = as_variable(baseType->sparse_coo_tensor(indices_, values_));
+ set_history(result, grad_fn);
+ if (jit::tracer::isTracing( indices, values )) {
+ jit::Node *n = jit::tracer::recordTrace( "sparse_coo_tensor", { indices, values }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor VariableType::alias(const Tensor & self) const {
profiler::RecordFunction profiler("alias");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AliasBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AliasBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->alias(self_));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->alias(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "alias", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "alias", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::_copy_ignoring_overlaps_(Tensor & self, const Tensor & src) const {
- throw std::runtime_error("VariableType::_copy_ignoring_overlaps_ NYI");
+ profiler::RecordFunction profiler("_copy_ignoring_overlaps_");
+ auto& self_ = unpack(self, "self", 0);
+ auto& src_ = unpack(src, "src", 1);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self, src })) {
+ grad_fn = std::make_shared<Error>("the derivative for _copy_ignoring_overlaps_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self, src });
+
+ }
+ baseType->_copy_ignoring_overlaps_(self_, src_);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self, src )) {
+ jit::Node *n = jit::tracer::recordTrace( "_copy_ignoring_overlaps", { self, src }, { self } );
+ (void)n;
+ }
+ return self;
}
Tensor VariableType::as_strided(const Tensor & self, IntList size, IntList stride, int64_t storage_offset) const {
profiler::RecordFunction profiler("as_strided");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AsStridedBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AsStridedBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_geometry = TensorGeometry(self);
@@ -5411,23 +5449,22 @@
grad_fn->stride = stride;
grad_fn->storage_offset = storage_offset;
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->as_strided(self_, size, stride, storage_offset));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->as_strided(self_, size, stride, storage_offset));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { result } );
setattr(n, jit::stringToSymbol("size"), size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("storage_offset"), storage_offset);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::as_strided_(Tensor & self, IntList size, IntList stride, int64_t storage_offset) const {
profiler::RecordFunction profiler("as_strided_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<AsStridedBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AsStridedBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_geometry = TensorGeometry(self);
@@ -5438,7 +5475,7 @@
baseType->as_strided_(self_, size, stride, storage_offset);
ensure_no_aten_scalars(self);
increment_version(self);
- set_history(static_cast<Variable&>(self), grad_fn);
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { self } );
setattr(n, jit::stringToSymbol("size"), size);
@@ -5451,59 +5488,101 @@
profiler::RecordFunction profiler("cat");
auto tensors_ = unpack(tensors, "tensors", 0);
std::shared_ptr<CatBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ tensors });
- if (requires_grad) {
+ if (compute_requires_grad({ tensors })) {
grad_fn = std::make_shared<CatBackward>();
grad_fn->next_functions = compute_next_functions({ tensors });
grad_fn->tensors_sizes_dim = to_arg_sizes(tensors, dim);
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->cat(tensors_, dim));
- set_history(ret, grad_fn);
+ auto self = as_variable(baseType->cat(tensors_, dim));
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( tensors )) {
- jit::Node *n = jit::tracer::recordTrace( "cat", flatten( tensors ), { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cat", flatten( tensors ), { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return self;
}
Tensor & VariableType::reshape_(Tensor & self, IntList size, IntList stride) const {
- throw std::runtime_error("VariableType::reshape_ NYI");
+ profiler::RecordFunction profiler("reshape_");
+ auto& self_ = unpack(self, "self", 0);
+ check_inplace(self);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for reshape_ is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ baseType->reshape_(self_, size, stride);
+ increment_version(self);
+ rebase_history(self, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "reshape", { self }, { self } );
+ setattr(n, jit::stringToSymbol("size"), size);
+ setattr(n, jit::stringToSymbol("stride"), stride);
+ }
+ return self;
}
Tensor VariableType::_sparse_mask(const Tensor & self, SparseTensor mask) const {
profiler::RecordFunction profiler("_sparse_mask");
auto& self_ = unpack(self, "self", 0);
auto mask_ = unpack(mask, "mask", 1);
std::shared_ptr<SparseMaskBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SparseMaskBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->_sparse_mask(self_, mask_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->_sparse_mask(self_, mask_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "_sparse_mask", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_sparse_mask", { self }, { result } );
setattr(n, jit::stringToSymbol("mask"), mask);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::_indices(const Tensor & self) const {
+ profiler::RecordFunction profiler("_indices");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->_indices(self_));
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for _indices is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ auto result = as_variable(baseType->_indices(self_));
+ set_history(result, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "_indices", { self }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor VariableType::_values(const Tensor & self) const {
+ profiler::RecordFunction profiler("_values");
auto& self_ = unpack(self, "self", 0);
- return as_variable(baseType->_values(self_));
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for _values is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ auto result = as_variable(baseType->_values(self_));
+ set_history(result, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "_values", { self }, { result } );
+ (void)n;
+ }
+ return result;
}
Tensor VariableType::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("binary_cross_entropy");
- auto ret = Type::binary_cross_entropy(self, target, weight, size_average, reduce);
+ auto output = Type::binary_cross_entropy(self, target, weight, size_average, reduce);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy", { self, target, weight }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::binary_cross_entropy_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("binary_cross_entropy_forward");
@@ -5513,8 +5592,7 @@
check_no_requires_grad(target, "target");
check_no_requires_grad(weight, "weight");
std::shared_ptr<BinaryCrossEntropyBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<BinaryCrossEntropyBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5523,14 +5601,14 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->binary_cross_entropy_forward(self_, target_, weight_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->binary_cross_entropy_forward(self_, target_, weight_, size_average, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_forward", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_forward", { self, target, weight }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("binary_cross_entropy_backward");
@@ -5539,29 +5617,29 @@
auto& target_ = unpack(target, "target", 2);
auto weight_ = unpack_opt(weight, "weight", 3);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, target, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, target, weight })) {
grad_fn = std::make_shared<Error>("the derivative for binary_cross_entropy_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ grad_output, self, target, weight });
+
}
- auto ret = as_variable(baseType->binary_cross_entropy_backward(grad_output_, self_, target_, weight_, size_average, reduce));
- set_history({ ret }, grad_fn);
+ auto grad_input = as_variable(baseType->binary_cross_entropy_backward(grad_output_, self_, target_, weight_, size_average, reduce));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_backward", { grad_output, self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_backward", { grad_output, self, target, weight }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::kl_div(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("kl_div");
- auto ret = Type::kl_div(self, target, size_average, reduce);
+ auto output = Type::kl_div(self, target, size_average, reduce);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "kl_div", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "kl_div", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::kl_div_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("kl_div_forward");
@@ -5569,8 +5647,7 @@
auto& target_ = unpack(target, "target", 1);
check_no_requires_grad(target, "target");
std::shared_ptr<KlDivBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<KlDivBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5578,14 +5655,14 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->kl_div_forward(self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->kl_div_forward(self_, target_, size_average, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "kl_div_forward", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "kl_div_forward", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("kl_div_backward");
@@ -5594,8 +5671,7 @@
auto& target_ = unpack(target, "target", 2);
check_no_requires_grad(target, "target");
std::shared_ptr<KlDivBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<KlDivBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5603,24 +5679,24 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->kl_div_backward(grad_output_, self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->kl_div_backward(grad_output_, self_, target_, size_average, reduce));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "kl_div_backward", { grad_output, self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "kl_div_backward", { grad_output, self, target }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("l1_loss");
- auto ret = Type::l1_loss(self, target, size_average, reduce);
+ auto output = Type::l1_loss(self, target, size_average, reduce);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "l1_loss", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "l1_loss", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::l1_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("l1_loss_forward");
@@ -5628,8 +5704,7 @@
auto& target_ = unpack(target, "target", 1);
check_no_requires_grad(target, "target");
std::shared_ptr<L1LossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<L1LossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5637,14 +5712,14 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->l1_loss_forward(self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->l1_loss_forward(self_, target_, size_average, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "l1_loss_forward", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "l1_loss_forward", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("l1_loss_backward");
@@ -5653,8 +5728,7 @@
auto& target_ = unpack(target, "target", 2);
check_no_requires_grad(target, "target");
std::shared_ptr<L1LossBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<L1LossBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5662,24 +5736,24 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->l1_loss_backward(grad_output_, self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->l1_loss_backward(grad_output_, self_, target_, size_average, reduce));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "l1_loss_backward", { grad_output, self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "l1_loss_backward", { grad_output, self, target }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::mse_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("mse_loss");
- auto ret = Type::mse_loss(self, target, size_average, reduce);
+ auto output = Type::mse_loss(self, target, size_average, reduce);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "mse_loss", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mse_loss", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::mse_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("mse_loss_forward");
@@ -5687,8 +5761,7 @@
auto& target_ = unpack(target, "target", 1);
check_no_requires_grad(target, "target");
std::shared_ptr<MseLossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MseLossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5696,14 +5769,14 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->mse_loss_forward(self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->mse_loss_forward(self_, target_, size_average, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "mse_loss_forward", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mse_loss_forward", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("mse_loss_backward");
@@ -5712,8 +5785,7 @@
auto& target_ = unpack(target, "target", 2);
check_no_requires_grad(target, "target");
std::shared_ptr<MseLossBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<MseLossBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -5722,25 +5794,25 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->mse_loss_backward(grad_output_, self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->mse_loss_backward(grad_output_, self_, target_, size_average, reduce));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "mse_loss_backward", { grad_output, self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "mse_loss_backward", { grad_output, self, target }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const {
profiler::RecordFunction profiler("multi_margin_loss");
- auto ret = Type::multi_margin_loss(self, target, p, margin, weight, size_average);
+ auto output = Type::multi_margin_loss(self, target, p, margin, weight, size_average);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss", { self, target, weight }, { output } );
setattr(n, jit::stringToSymbol("p"), p);
setattr(n, jit::stringToSymbol("margin"), margin);
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::multi_margin_loss_forward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const {
profiler::RecordFunction profiler("multi_margin_loss_forward");
@@ -5749,8 +5821,7 @@
auto weight_ = unpack_opt(weight, "weight", 4);
check_no_requires_grad(weight, "weight");
std::shared_ptr<MultiMarginLossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MultiMarginLossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5760,15 +5831,15 @@
grad_fn->weight_ = SavedVariable(weight, false);
grad_fn->size_average = size_average;
}
- auto ret = as_variable(baseType->multi_margin_loss_forward(self_, target_, p, margin, weight_, size_average));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->multi_margin_loss_forward(self_, target_, p, margin, weight_, size_average));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_forward", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_forward", { self, target, weight }, { output } );
setattr(n, jit::stringToSymbol("p"), p);
setattr(n, jit::stringToSymbol("margin"), margin);
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::multi_margin_loss_backward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const {
profiler::RecordFunction profiler("multi_margin_loss_backward");
@@ -5776,54 +5847,53 @@
auto& target_ = unpack_long(target, "target", 1);
auto weight_ = unpack_opt(weight, "weight", 4);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ self, target, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ self, target, weight })) {
grad_fn = std::make_shared<Error>("the derivative for multi_margin_loss_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ self, target, weight });
+
}
- auto ret = as_variable(baseType->multi_margin_loss_backward(self_, target_, p, margin, weight_, size_average));
- set_history({ ret }, grad_fn);
+ auto grad_input = as_variable(baseType->multi_margin_loss_backward(self_, target_, p, margin, weight_, size_average));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_backward", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_backward", { self, target, weight }, { grad_input } );
setattr(n, jit::stringToSymbol("p"), p);
setattr(n, jit::stringToSymbol("margin"), margin);
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::multilabel_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const {
profiler::RecordFunction profiler("multilabel_margin_loss");
- auto ret = Type::multilabel_margin_loss(self, target, size_average);
+ auto output = Type::multilabel_margin_loss(self, target, size_average);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, bool size_average) const {
profiler::RecordFunction profiler("multilabel_margin_loss_forward");
auto& self_ = unpack(self, "self", 0);
auto& target_ = unpack_long(target, "target", 1);
std::shared_ptr<MultilabelMarginLossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MultilabelMarginLossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->target_ = SavedVariable(target, false);
grad_fn->size_average = size_average;
}
- auto ret = as_variable(baseType->multilabel_margin_loss_forward(self_, target_, size_average));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, is_target;
+ std::tie(output, is_target) = as_variable(baseType->multilabel_margin_loss_forward(self_, target_, size_average));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_forward", { self, target }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_forward", { self, target }, { output, is_target } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
if (grad_fn) {
- auto& is_target = std::get<1>(ret);
grad_fn->is_target_ = SavedVariable(is_target, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(is_target));
}
Tensor VariableType::multilabel_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average, const Tensor & is_target) const {
profiler::RecordFunction profiler("multilabel_margin_loss_backward");
@@ -5831,29 +5901,29 @@
auto& target_ = unpack_long(target, "target", 1);
auto& is_target_ = unpack(is_target, "is_target", 3);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ self, target, is_target });
- if (requires_grad) {
+ if (compute_requires_grad({ self, target, is_target })) {
grad_fn = std::make_shared<Error>("the derivative for multilabel_margin_loss_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ self, target, is_target });
+
}
- auto ret = as_variable(baseType->multilabel_margin_loss_backward(self_, target_, size_average, is_target_));
- set_history({ ret }, grad_fn);
+ auto grad_input = as_variable(baseType->multilabel_margin_loss_backward(self_, target_, size_average, is_target_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( self, target, is_target )) {
- jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_backward", { self, target, is_target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_backward", { self, target, is_target }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const {
profiler::RecordFunction profiler("nll_loss");
- auto ret = Type::nll_loss(self, target, weight, size_average, ignore_index, reduce);
+ auto output = Type::nll_loss(self, target, weight, size_average, ignore_index, reduce);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nll_loss", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss", { self, target, weight }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const {
profiler::RecordFunction profiler("nll_loss_forward");
@@ -5862,8 +5932,7 @@
auto weight_ = unpack_opt(weight, "weight", 2);
check_no_requires_grad(weight, "weight");
std::shared_ptr<NllLossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NllLossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5873,19 +5942,19 @@
grad_fn->ignore_index = ignore_index;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->nll_loss_forward(self_, target_, weight_, size_average, ignore_index, reduce));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, total_weight;
+ std::tie(output, total_weight) = as_variable(baseType->nll_loss_forward(self_, target_, weight_, size_average, ignore_index, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nll_loss_forward", { self, target, weight }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss_forward", { self, target, weight }, { output, total_weight } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
if (grad_fn) {
- auto& total_weight = std::get<1>(ret);
grad_fn->total_weight_ = SavedVariable(total_weight, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(total_weight));
}
Tensor VariableType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const {
profiler::RecordFunction profiler("nll_loss_backward");
@@ -5897,8 +5966,7 @@
check_no_requires_grad(weight, "weight");
check_no_requires_grad(total_weight, "total_weight");
std::shared_ptr<NllLossBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<NllLossBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->target_ = SavedVariable(target, false);
@@ -5907,26 +5975,26 @@
grad_fn->ignore_index = ignore_index;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->nll_loss_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->nll_loss_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target, weight, total_weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nll_loss_backward", { grad_output, self, target, weight, total_weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss_backward", { grad_output, self, target, weight, total_weight }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const {
profiler::RecordFunction profiler("nll_loss2d");
- auto ret = Type::nll_loss2d(self, target, weight, size_average, ignore_index, reduce);
+ auto output = Type::nll_loss2d(self, target, weight, size_average, ignore_index, reduce);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nll_loss2d", { self, target, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss2d", { self, target, weight }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const {
profiler::RecordFunction profiler("nll_loss2d_forward");
@@ -5935,8 +6003,7 @@
auto weight_ = unpack_opt(weight, "weight", 2);
check_no_requires_grad(weight, "weight");
std::shared_ptr<NllLoss2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<NllLoss2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -5946,19 +6013,19 @@
grad_fn->ignore_index = ignore_index;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->nll_loss2d_forward(self_, target_, weight_, size_average, ignore_index, reduce));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, total_weight;
+ std::tie(output, total_weight) = as_variable(baseType->nll_loss2d_forward(self_, target_, weight_, size_average, ignore_index, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_forward", { self, target, weight }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_forward", { self, target, weight }, { output, total_weight } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
if (grad_fn) {
- auto& total_weight = std::get<1>(ret);
grad_fn->total_weight_ = SavedVariable(total_weight, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(total_weight));
}
Tensor VariableType::nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const {
profiler::RecordFunction profiler("nll_loss2d_backward");
@@ -5970,8 +6037,7 @@
check_no_requires_grad(weight, "weight");
check_no_requires_grad(total_weight, "total_weight");
std::shared_ptr<NllLoss2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<NllLoss2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->target_ = SavedVariable(target, false);
@@ -5980,25 +6046,25 @@
grad_fn->ignore_index = ignore_index;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->nll_loss2d_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->nll_loss2d_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target, weight, total_weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_backward", { grad_output, self, target, weight, total_weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_backward", { grad_output, self, target, weight, total_weight }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::smooth_l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("smooth_l1_loss");
- auto ret = Type::smooth_l1_loss(self, target, size_average, reduce);
+ auto output = Type::smooth_l1_loss(self, target, size_average, reduce);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::smooth_l1_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("smooth_l1_loss_forward");
@@ -6006,8 +6072,7 @@
auto& target_ = unpack(target, "target", 1);
check_no_requires_grad(target, "target");
std::shared_ptr<SmoothL1LossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SmoothL1LossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -6015,14 +6080,14 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->smooth_l1_loss_forward(self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->smooth_l1_loss_forward(self_, target_, size_average, reduce));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_forward", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_forward", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const {
profiler::RecordFunction profiler("smooth_l1_loss_backward");
@@ -6031,8 +6096,7 @@
auto& target_ = unpack(target, "target", 2);
check_no_requires_grad(target, "target");
std::shared_ptr<SmoothL1LossBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<SmoothL1LossBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -6041,23 +6105,23 @@
grad_fn->size_average = size_average;
grad_fn->reduce = reduce;
}
- auto ret = as_variable(baseType->smooth_l1_loss_backward(grad_output_, self_, target_, size_average, reduce));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->smooth_l1_loss_backward(grad_output_, self_, target_, size_average, reduce));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_backward", { grad_output, self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_backward", { grad_output, self, target }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
setattr(n, jit::stringToSymbol("reduce"), reduce);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::soft_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const {
profiler::RecordFunction profiler("soft_margin_loss");
- auto ret = Type::soft_margin_loss(self, target, size_average);
+ auto output = Type::soft_margin_loss(self, target, size_average);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::soft_margin_loss_forward(const Tensor & self, const Tensor & target, bool size_average) const {
profiler::RecordFunction profiler("soft_margin_loss_forward");
@@ -6065,21 +6129,20 @@
auto& target_ = unpack(target, "target", 1);
check_no_requires_grad(target, "target");
std::shared_ptr<SoftMarginLossBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SoftMarginLossBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->target_ = SavedVariable(target, false);
grad_fn->size_average = size_average;
}
- auto ret = as_variable(baseType->soft_margin_loss_forward(self_, target_, size_average));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->soft_margin_loss_forward(self_, target_, size_average));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_forward", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_forward", { self, target }, { output } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::soft_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average) const {
profiler::RecordFunction profiler("soft_margin_loss_backward");
@@ -6087,63 +6150,59 @@
auto& target_ = unpack(target, "target", 1);
check_no_requires_grad(target, "target");
std::shared_ptr<SoftMarginLossBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SoftMarginLossBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->target_ = SavedVariable(target, false);
grad_fn->size_average = size_average;
}
- auto ret = as_variable(baseType->soft_margin_loss_backward(self_, target_, size_average));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->soft_margin_loss_backward(self_, target_, size_average));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( self, target )) {
- jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_backward", { self, target }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_backward", { self, target }, { grad_input } );
setattr(n, jit::stringToSymbol("size_average"), size_average);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::elu(const Tensor & self, Scalar alpha, Scalar scale) const {
profiler::RecordFunction profiler("elu");
- auto ret = Type::elu(self, alpha, scale);
+ auto output = Type::elu(self, alpha, scale);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { output } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
setattr(n, jit::stringToSymbol("scale"), scale);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::elu_forward(const Tensor & self, Scalar alpha, Scalar scale) const {
profiler::RecordFunction profiler("elu_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<EluBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<EluBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->alpha = alpha;
grad_fn->scale = scale;
}
- auto ret = as_variable(baseType->elu_forward(self_, alpha, scale));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->elu_forward(self_, alpha, scale));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "elu_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "elu_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
setattr(n, jit::stringToSymbol("scale"), scale);
}
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, const Tensor & output) const {
profiler::RecordFunction profiler("elu_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& output_ = unpack(output, "output", 3);
std::shared_ptr<EluBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, output });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, output })) {
grad_fn = std::make_shared<EluBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, output });
grad_fn->alpha = alpha;
@@ -6151,18 +6210,18 @@
grad_fn->output_ = SavedVariable(output, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->elu_backward(grad_output_, alpha, scale, output_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->elu_backward(grad_output_, alpha, scale, output_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, output )) {
- jit::Node *n = jit::tracer::recordTrace( "elu_backward", { grad_output, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "elu_backward", { grad_output, output }, { grad_input } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
setattr(n, jit::stringToSymbol("scale"), scale);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor & VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const {
profiler::RecordFunction profiler("elu_");
- auto ret = Type::elu_(self, alpha, scale);
+ Type::elu_(self, alpha, scale);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { self } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
@@ -6175,8 +6234,7 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<EluBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<EluBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->alpha = alpha;
@@ -6184,7 +6242,7 @@
}
baseType->elu_forward_(self_, alpha, scale);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "elu_forward", { self }, { self } );
setattr(n, jit::stringToSymbol("alpha"), alpha);
@@ -6197,157 +6255,151 @@
}
Tensor VariableType::glu(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("glu");
- auto ret = Type::glu(self, dim);
+ auto output = Type::glu(self, dim);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "glu", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "glu", { self }, { output } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::glu_forward(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("glu_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<GluBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<GluBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->glu_forward(self_, dim));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->glu_forward(self_, dim));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "glu_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "glu_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("glu_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<GluBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<GluBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->glu_backward(grad_output_, self_, dim));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->glu_backward(grad_output_, self_, dim));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "glu_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "glu_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::hardshrink(const Tensor & self, Scalar lambd) const {
profiler::RecordFunction profiler("hardshrink");
- auto ret = Type::hardshrink(self, lambd);
+ auto output = Type::hardshrink(self, lambd);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "hardshrink", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "hardshrink", { self }, { output } );
setattr(n, jit::stringToSymbol("lambd"), lambd);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::hardshrink_forward(const Tensor & self, Scalar lambd) const {
profiler::RecordFunction profiler("hardshrink_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<HardshrinkBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<HardshrinkBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->lambd = lambd;
}
- auto ret = as_variable(baseType->hardshrink_forward(self_, lambd));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->hardshrink_forward(self_, lambd));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "hardshrink_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "hardshrink_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("lambd"), lambd);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::hardshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const {
profiler::RecordFunction profiler("hardshrink_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<HardshrinkBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<HardshrinkBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->lambd = lambd;
}
- auto ret = as_variable(baseType->hardshrink_backward(grad_output_, self_, lambd));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->hardshrink_backward(grad_output_, self_, lambd));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "hardshrink_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "hardshrink_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("lambd"), lambd);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const {
profiler::RecordFunction profiler("hardtanh");
- auto ret = Type::hardtanh(self, min_val, max_val);
+ auto output = Type::hardtanh(self, min_val, max_val);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { output } );
setattr(n, jit::stringToSymbol("min_val"), min_val);
setattr(n, jit::stringToSymbol("max_val"), max_val);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::hardtanh_forward(const Tensor & self, Scalar min_val, Scalar max_val) const {
profiler::RecordFunction profiler("hardtanh_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<HardtanhBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<HardtanhBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->min_val = min_val;
grad_fn->max_val = max_val;
}
- auto ret = as_variable(baseType->hardtanh_forward(self_, min_val, max_val));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->hardtanh_forward(self_, min_val, max_val));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "hardtanh_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "hardtanh_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("min_val"), min_val);
setattr(n, jit::stringToSymbol("max_val"), max_val);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const {
profiler::RecordFunction profiler("hardtanh_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<HardtanhBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<HardtanhBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->min_val = min_val;
grad_fn->max_val = max_val;
}
- auto ret = as_variable(baseType->hardtanh_backward(grad_output_, self_, min_val, max_val));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->hardtanh_backward(grad_output_, self_, min_val, max_val));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "hardtanh_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "hardtanh_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("min_val"), min_val);
setattr(n, jit::stringToSymbol("max_val"), max_val);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor & VariableType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const {
profiler::RecordFunction profiler("hardtanh_");
- auto ret = Type::hardtanh_(self, min_val, max_val);
+ Type::hardtanh_(self, min_val, max_val);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { self } );
setattr(n, jit::stringToSymbol("min_val"), min_val);
@@ -6360,8 +6412,7 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<HardtanhBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<HardtanhBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->min_val = min_val;
@@ -6369,7 +6420,7 @@
}
baseType->hardtanh_forward_(self_, min_val, max_val);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "hardtanh_forward", { self }, { self } );
setattr(n, jit::stringToSymbol("min_val"), min_val);
@@ -6382,55 +6433,53 @@
}
Tensor VariableType::leaky_relu(const Tensor & self, Scalar negative_slope) const {
profiler::RecordFunction profiler("leaky_relu");
- auto ret = Type::leaky_relu(self, negative_slope);
+ auto output = Type::leaky_relu(self, negative_slope);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { output } );
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::leaky_relu_forward(const Tensor & self, Scalar negative_slope) const {
profiler::RecordFunction profiler("leaky_relu_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<LeakyReluBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LeakyReluBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->negative_slope = negative_slope;
}
- auto ret = as_variable(baseType->leaky_relu_forward(self_, negative_slope));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->leaky_relu_forward(self_, negative_slope));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "leaky_relu_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "leaky_relu_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const {
profiler::RecordFunction profiler("leaky_relu_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<LeakyReluBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<LeakyReluBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->negative_slope = negative_slope;
}
- auto ret = as_variable(baseType->leaky_relu_backward(grad_output_, self_, negative_slope));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->leaky_relu_backward(grad_output_, self_, negative_slope));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "leaky_relu_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "leaky_relu_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor & VariableType::leaky_relu_(Tensor & self, Scalar negative_slope) const {
profiler::RecordFunction profiler("leaky_relu_");
- auto ret = Type::leaky_relu_(self, negative_slope);
+ Type::leaky_relu_(self, negative_slope);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { self } );
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope);
@@ -6442,15 +6491,14 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<LeakyReluBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LeakyReluBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->negative_slope = negative_slope;
}
baseType->leaky_relu_forward_(self_, negative_slope);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "leaky_relu_forward", { self }, { self } );
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope);
@@ -6462,34 +6510,33 @@
}
Tensor VariableType::log_sigmoid(const Tensor & self) const {
profiler::RecordFunction profiler("log_sigmoid");
- auto ret = Type::log_sigmoid(self);
+ auto output = Type::log_sigmoid(self);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "log_sigmoid", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log_sigmoid", { self }, { output } );
(void)n;
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::log_sigmoid_forward(const Tensor & self) const {
profiler::RecordFunction profiler("log_sigmoid_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<LogSigmoidBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LogSigmoidBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->log_sigmoid_forward(self_));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, buffer;
+ std::tie(output, buffer) = as_variable(baseType->log_sigmoid_forward(self_));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_forward", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_forward", { self }, { output, buffer } );
(void)n;
}
if (grad_fn) {
- auto& buffer = std::get<1>(ret);
grad_fn->buffer_ = SavedVariable(buffer, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(buffer));
}
Tensor VariableType::log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const {
profiler::RecordFunction profiler("log_sigmoid_backward");
@@ -6498,53 +6545,50 @@
auto& buffer_ = unpack(buffer, "buffer", 2);
check_no_requires_grad(buffer, "buffer");
std::shared_ptr<LogSigmoidBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<LogSigmoidBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->buffer_ = SavedVariable(buffer, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->log_sigmoid_backward(grad_output_, self_, buffer_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->log_sigmoid_backward(grad_output_, self_, buffer_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, buffer )) {
- jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_backward", { grad_output, self, buffer }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_backward", { grad_output, self, buffer }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::log_softmax(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("log_softmax");
- auto ret = Type::log_softmax(self, dim);
+ auto output = Type::log_softmax(self, dim);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "log_softmax", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log_softmax", { self }, { output } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::log_softmax_forward(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("log_softmax_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<LogSoftmaxBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<LogSoftmaxBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->log_softmax_forward(self_, dim));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->log_softmax_forward(self_, dim));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "log_softmax_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log_softmax_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::log_softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const {
profiler::RecordFunction profiler("log_softmax_backward");
@@ -6552,50 +6596,48 @@
auto& self_ = unpack(self, "self", 1);
auto& output_ = unpack(output, "output", 3);
std::shared_ptr<LogSoftmaxBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<LogSoftmaxBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->dim = dim;
grad_fn->output_ = SavedVariable(output, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->log_softmax_backward(grad_output_, self_, dim, output_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->log_softmax_backward(grad_output_, self_, dim, output_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, output )) {
- jit::Node *n = jit::tracer::recordTrace( "log_softmax_backward", { grad_output, self, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "log_softmax_backward", { grad_output, self, output }, { grad_input } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::prelu(const Tensor & self, const Tensor & weight) const {
profiler::RecordFunction profiler("prelu");
- auto ret = Type::prelu(self, weight);
+ auto output = Type::prelu(self, weight);
if (jit::tracer::isTracing( self, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "prelu", { self, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "prelu", { self, weight }, { output } );
(void)n;
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::prelu_forward(const Tensor & self, const Tensor & weight) const {
profiler::RecordFunction profiler("prelu_forward");
auto& self_ = unpack(self, "self", 0);
auto& weight_ = unpack(weight, "weight", 1);
std::shared_ptr<PreluBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight })) {
grad_fn = std::make_shared<PreluBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->weight_ = SavedVariable(weight, false);
}
- auto ret = as_variable(baseType->prelu_forward(self_, weight_));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->prelu_forward(self_, weight_));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "prelu_forward", { self, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "prelu_forward", { self, weight }, { output } );
(void)n;
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, std::array<bool,2> output_mask) const {
profiler::RecordFunction profiler("prelu_backward");
@@ -6603,26 +6645,26 @@
auto& self_ = unpack(self, "self", 1);
auto& weight_ = unpack(weight, "weight", 2);
std::shared_ptr<PreluBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<PreluBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
grad_fn->self_ = SavedVariable(self, false);
grad_fn->weight_ = SavedVariable(weight, false);
}
- auto ret = as_variable(baseType->prelu_backward(grad_output_, self_, weight_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight;
+ std::tie(grad_input, grad_weight) = as_variable(baseType->prelu_backward(grad_output_, self_, weight_, output_mask));
+ set_history({ grad_input, grad_weight }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "prelu_backward", { grad_output, self, weight }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "prelu_backward", { grad_output, self, weight }, { grad_input, grad_weight } );
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight));
}
Tensor VariableType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
profiler::RecordFunction profiler("rrelu_with_noise");
- auto ret = Type::rrelu_with_noise(self, noise, lower, upper, training, generator);
- return Tensor(std::move(ret));
+ auto output = Type::rrelu_with_noise(self, noise, lower, upper, training, generator);
+ return output;
}
Tensor VariableType::rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
profiler::RecordFunction profiler("rrelu_with_noise_forward");
@@ -6630,8 +6672,7 @@
auto& noise_ = unpack(noise, "noise", 1);
check_no_requires_grad(noise, "noise");
std::shared_ptr<RreluWithNoiseBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RreluWithNoiseBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -6640,9 +6681,9 @@
grad_fn->upper = upper;
grad_fn->training = training;
}
- auto ret = as_variable(baseType->rrelu_with_noise_forward(self_, noise_, lower, upper, training, generator));
- set_history(ret, grad_fn);
- return Tensor(std::move(ret));
+ auto output = as_variable(baseType->rrelu_with_noise_forward(self_, noise_, lower, upper, training, generator));
+ set_history(output, grad_fn);
+ return output;
}
Tensor VariableType::rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const {
profiler::RecordFunction profiler("rrelu_with_noise_backward");
@@ -6651,8 +6692,7 @@
auto& noise_ = unpack(noise, "noise", 2);
check_no_requires_grad(noise, "noise");
std::shared_ptr<RreluWithNoiseBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<RreluWithNoiseBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
@@ -6661,19 +6701,19 @@
grad_fn->upper = upper;
grad_fn->training = training;
}
- auto ret = as_variable(baseType->rrelu_with_noise_backward(grad_output_, self_, noise_, lower, upper, training));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->rrelu_with_noise_backward(grad_output_, self_, noise_, lower, upper, training));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, noise )) {
- jit::Node *n = jit::tracer::recordTrace( "rrelu_with_noise_backward", { grad_output, self, noise }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "rrelu_with_noise_backward", { grad_output, self, noise }, { grad_input } );
setattr(n, jit::stringToSymbol("lower"), lower);
setattr(n, jit::stringToSymbol("upper"), upper);
setattr(n, jit::stringToSymbol("training"), training);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor & VariableType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
profiler::RecordFunction profiler("rrelu_with_noise_");
- auto ret = Type::rrelu_with_noise_(self, noise, lower, upper, training, generator);
+ Type::rrelu_with_noise_(self, noise, lower, upper, training, generator);
return self;
}
Tensor & VariableType::rrelu_with_noise_forward_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const {
@@ -6683,8 +6723,7 @@
check_inplace(self);
check_no_requires_grad(noise, "noise");
std::shared_ptr<RreluWithNoiseBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<RreluWithNoiseBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->noise_ = SavedVariable(noise, false);
@@ -6694,7 +6733,7 @@
}
baseType->rrelu_with_noise_forward_(self_, noise_, lower, upper, training, generator);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (grad_fn) {
grad_fn->output_ = SavedVariable(self, true);
}
@@ -6702,35 +6741,33 @@
}
Tensor VariableType::softmax(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("softmax");
- auto ret = Type::softmax(self, dim);
+ auto output = Type::softmax(self, dim);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "softmax", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softmax", { self }, { output } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::softmax_forward(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("softmax_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SoftmaxBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SoftmaxBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->softmax_forward(self_, dim));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->softmax_forward(self_, dim));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "softmax_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softmax_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const {
profiler::RecordFunction profiler("softmax_backward");
@@ -6738,8 +6775,7 @@
auto& self_ = unpack(self, "self", 1);
auto& output_ = unpack(output, "output", 3);
std::shared_ptr<SoftmaxBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<SoftmaxBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
@@ -6747,48 +6783,46 @@
grad_fn->output_ = SavedVariable(output, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->softmax_backward(grad_output_, self_, dim, output_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->softmax_backward(grad_output_, self_, dim, output_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, output )) {
- jit::Node *n = jit::tracer::recordTrace( "softmax_backward", { grad_output, self, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softmax_backward", { grad_output, self, output }, { grad_input } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::softplus(const Tensor & self, Scalar beta, Scalar threshold) const {
profiler::RecordFunction profiler("softplus");
- auto ret = Type::softplus(self, beta, threshold);
+ auto output = Type::softplus(self, beta, threshold);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "softplus", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softplus", { self }, { output } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("threshold"), threshold);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::softplus_forward(const Tensor & self, Scalar beta, Scalar threshold) const {
profiler::RecordFunction profiler("softplus_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SoftplusBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SoftplusBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->beta = beta;
grad_fn->threshold = threshold;
}
- auto ret = as_variable(baseType->softplus_forward(self_, beta, threshold));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->softplus_forward(self_, beta, threshold));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "softplus_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softplus_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("threshold"), threshold);
}
if (grad_fn) {
- auto& output = ret;
grad_fn->output_ = SavedVariable(output, true);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const {
profiler::RecordFunction profiler("softplus_backward");
@@ -6796,8 +6830,7 @@
auto& self_ = unpack(self, "self", 1);
auto& output_ = unpack(output, "output", 4);
std::shared_ptr<SoftplusBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<SoftplusBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
@@ -6806,119 +6839,115 @@
grad_fn->output_ = SavedVariable(output, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->softplus_backward(grad_output_, self_, beta, threshold, output_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->softplus_backward(grad_output_, self_, beta, threshold, output_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, output )) {
- jit::Node *n = jit::tracer::recordTrace( "softplus_backward", { grad_output, self, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softplus_backward", { grad_output, self, output }, { grad_input } );
setattr(n, jit::stringToSymbol("beta"), beta);
setattr(n, jit::stringToSymbol("threshold"), threshold);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::softshrink(const Tensor & self, Scalar lambd) const {
profiler::RecordFunction profiler("softshrink");
- auto ret = Type::softshrink(self, lambd);
+ auto output = Type::softshrink(self, lambd);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "softshrink", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softshrink", { self }, { output } );
setattr(n, jit::stringToSymbol("lambd"), lambd);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::softshrink_forward(const Tensor & self, Scalar lambd) const {
profiler::RecordFunction profiler("softshrink_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SoftshrinkBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SoftshrinkBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->lambd = lambd;
}
- auto ret = as_variable(baseType->softshrink_forward(self_, lambd));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->softshrink_forward(self_, lambd));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "softshrink_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softshrink_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("lambd"), lambd);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const {
profiler::RecordFunction profiler("softshrink_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<SoftshrinkBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<SoftshrinkBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->lambd = lambd;
}
- auto ret = as_variable(baseType->softshrink_backward(grad_output_, self_, lambd));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->softshrink_backward(grad_output_, self_, lambd));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "softshrink_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "softshrink_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("lambd"), lambd);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::threshold(const Tensor & self, Scalar threshold, Scalar value) const {
profiler::RecordFunction profiler("threshold");
- auto ret = Type::threshold(self, threshold, value);
+ auto output = Type::threshold(self, threshold, value);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { output } );
setattr(n, jit::stringToSymbol("threshold"), threshold);
setattr(n, jit::stringToSymbol("value"), value);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::threshold_forward(const Tensor & self, Scalar threshold, Scalar value) const {
profiler::RecordFunction profiler("threshold_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ThresholdBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ThresholdBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->threshold = threshold;
grad_fn->value = value;
}
- auto ret = as_variable(baseType->threshold_forward(self_, threshold, value));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->threshold_forward(self_, threshold, value));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "threshold_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "threshold_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("threshold"), threshold);
setattr(n, jit::stringToSymbol("value"), value);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold, Scalar value) const {
profiler::RecordFunction profiler("threshold_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<ThresholdBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<ThresholdBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->threshold = threshold;
grad_fn->value = value;
}
- auto ret = as_variable(baseType->threshold_backward(grad_output_, self_, threshold, value));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->threshold_backward(grad_output_, self_, threshold, value));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "threshold_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "threshold_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("threshold"), threshold);
setattr(n, jit::stringToSymbol("value"), value);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor & VariableType::threshold_(Tensor & self, Scalar threshold, Scalar value) const {
profiler::RecordFunction profiler("threshold_");
- auto ret = Type::threshold_(self, threshold, value);
+ Type::threshold_(self, threshold, value);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { self } );
setattr(n, jit::stringToSymbol("threshold"), threshold);
@@ -6931,8 +6960,7 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<ThresholdBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ThresholdBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->threshold = threshold;
@@ -6940,7 +6968,7 @@
}
baseType->threshold_forward_(self_, threshold, value);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "threshold_forward", { self }, { self } );
setattr(n, jit::stringToSymbol("threshold"), threshold);
@@ -6953,128 +6981,124 @@
}
Tensor VariableType::adaptive_avg_pool2d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_avg_pool2d");
- auto ret = Type::adaptive_avg_pool2d(self, output_size);
+ auto output = Type::adaptive_avg_pool2d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::adaptive_avg_pool2d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_avg_pool2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AdaptiveAvgPool2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AdaptiveAvgPool2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->adaptive_avg_pool2d_forward(self_, output_size));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->adaptive_avg_pool2d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const {
profiler::RecordFunction profiler("adaptive_avg_pool2d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<AdaptiveAvgPool2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<AdaptiveAvgPool2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->adaptive_avg_pool2d_backward(grad_output_, self_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->adaptive_avg_pool2d_backward(grad_output_, self_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_backward", { grad_output, self }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::adaptive_avg_pool3d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_avg_pool3d");
- auto ret = Type::adaptive_avg_pool3d(self, output_size);
+ auto output = Type::adaptive_avg_pool3d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::adaptive_avg_pool3d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_avg_pool3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AdaptiveAvgPool3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AdaptiveAvgPool3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->adaptive_avg_pool3d_forward(self_, output_size));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->adaptive_avg_pool3d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const {
profiler::RecordFunction profiler("adaptive_avg_pool3d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<AdaptiveAvgPool3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<AdaptiveAvgPool3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->adaptive_avg_pool3d_backward(grad_output_, self_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->adaptive_avg_pool3d_backward(grad_output_, self_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_backward", { grad_output, self }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_max_pool2d");
- auto ret = Type::adaptive_max_pool2d(self, output_size);
+ Tensor output, indices;
+ std::tie(output, indices) = Type::adaptive_max_pool2d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_max_pool2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AdaptiveMaxPool2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AdaptiveMaxPool2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->adaptive_max_pool2d_forward(self_, output_size));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, indices;
+ std::tie(output, indices) = as_variable(baseType->adaptive_max_pool2d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_forward", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_forward", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
Tensor VariableType::adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const {
profiler::RecordFunction profiler("adaptive_max_pool2d_backward");
@@ -7082,52 +7106,51 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 2);
std::shared_ptr<AdaptiveMaxPool2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<AdaptiveMaxPool2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->indices_ = SavedVariable(indices, false);
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->adaptive_max_pool2d_backward(grad_output_, self_, indices_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->adaptive_max_pool2d_backward(grad_output_, self_, indices_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_backward", { grad_output, self, indices }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_max_pool3d");
- auto ret = Type::adaptive_max_pool3d(self, output_size);
+ Tensor output, indices;
+ std::tie(output, indices) = Type::adaptive_max_pool3d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_max_pool3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AdaptiveMaxPool3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AdaptiveMaxPool3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->adaptive_max_pool3d_forward(self_, output_size));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, indices;
+ std::tie(output, indices) = as_variable(baseType->adaptive_max_pool3d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_forward", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_forward", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
Tensor VariableType::adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const {
profiler::RecordFunction profiler("adaptive_max_pool3d_backward");
@@ -7135,41 +7158,39 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 2);
std::shared_ptr<AdaptiveMaxPool3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<AdaptiveMaxPool3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->indices_ = SavedVariable(indices, false);
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->adaptive_max_pool3d_backward(grad_output_, self_, indices_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->adaptive_max_pool3d_backward(grad_output_, self_, indices_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_backward", { grad_output, self, indices }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const {
profiler::RecordFunction profiler("avg_pool2d");
- auto ret = Type::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ auto output = Type::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "avg_pool2d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool2d", { self }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::avg_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const {
profiler::RecordFunction profiler("avg_pool2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AvgPool2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AvgPool2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -7179,25 +7200,24 @@
grad_fn->ceil_mode = ceil_mode;
grad_fn->count_include_pad = count_include_pad;
}
- auto ret = as_variable(baseType->avg_pool2d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->avg_pool2d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const {
profiler::RecordFunction profiler("avg_pool2d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<AvgPool2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<AvgPool2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->kernel_size = kernel_size;
@@ -7207,37 +7227,36 @@
grad_fn->count_include_pad = count_include_pad;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->avg_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->avg_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const {
profiler::RecordFunction profiler("avg_pool3d");
- auto ret = Type::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ auto output = Type::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "avg_pool3d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool3d", { self }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::avg_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const {
profiler::RecordFunction profiler("avg_pool3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AvgPool3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<AvgPool3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -7247,25 +7266,24 @@
grad_fn->ceil_mode = ceil_mode;
grad_fn->count_include_pad = count_include_pad;
}
- auto ret = as_variable(baseType->avg_pool3d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->avg_pool3d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const {
profiler::RecordFunction profiler("avg_pool3d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<AvgPool3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<AvgPool3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->kernel_size = kernel_size;
@@ -7275,27 +7293,28 @@
grad_fn->count_include_pad = count_include_pad;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->avg_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->avg_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const {
profiler::RecordFunction profiler("fractional_max_pool2d");
- auto ret = Type::fractional_max_pool2d(self, kernel_size, output_size, random_samples);
+ Tensor output, indices;
+ std::tie(output, indices) = Type::fractional_max_pool2d(self, kernel_size, output_size, random_samples);
if (jit::tracer::isTracing( self, random_samples )) {
- jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d", { self, random_samples }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d", { self, random_samples }, { output, indices } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d_forward(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const {
profiler::RecordFunction profiler("fractional_max_pool2d_forward");
@@ -7303,26 +7322,25 @@
auto& random_samples_ = unpack(random_samples, "random_samples", 3);
check_no_requires_grad(random_samples, "random_samples");
std::shared_ptr<FractionalMaxPool2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<FractionalMaxPool2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->kernel_size = kernel_size;
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->fractional_max_pool2d_forward(self_, kernel_size, output_size, random_samples_));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, indices;
+ std::tie(output, indices) = as_variable(baseType->fractional_max_pool2d_forward(self_, kernel_size, output_size, random_samples_));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, random_samples )) {
- jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_forward", { self, random_samples }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_forward", { self, random_samples }, { output, indices } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
Tensor VariableType::fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) const {
profiler::RecordFunction profiler("fractional_max_pool2d_backward");
@@ -7330,42 +7348,41 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 4);
std::shared_ptr<FractionalMaxPool2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<FractionalMaxPool2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->indices_ = SavedVariable(indices, false);
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->fractional_max_pool2d_backward(grad_output_, self_, kernel_size, output_size, indices_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->fractional_max_pool2d_backward(grad_output_, self_, kernel_size, output_size, indices_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_backward", { grad_output, self, indices }, { grad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
std::tuple<Tensor,Tensor> VariableType::max_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const {
profiler::RecordFunction profiler("max_pool2d");
- auto ret = Type::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
+ Tensor output, indices;
+ std::tie(output, indices) = Type::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool2d", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool2d", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::max_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const {
profiler::RecordFunction profiler("max_pool2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxPool2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaxPool2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -7375,10 +7392,11 @@
grad_fn->dilation = dilation;
grad_fn->ceil_mode = ceil_mode;
}
- auto ret = as_variable(baseType->max_pool2d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, indices;
+ std::tie(output, indices) = as_variable(baseType->max_pool2d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool2d_forward", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool2d_forward", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
@@ -7386,10 +7404,9 @@
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
Tensor VariableType::max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const {
profiler::RecordFunction profiler("max_pool2d_backward");
@@ -7397,44 +7414,43 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 7);
std::shared_ptr<MaxPool2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<MaxPool2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->indices_ = SavedVariable(indices, false);
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->max_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->max_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool2d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool2d_backward", { grad_output, self, indices }, { grad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
std::tuple<Tensor,Tensor> VariableType::max_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const {
profiler::RecordFunction profiler("max_pool3d");
- auto ret = Type::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode);
+ Tensor output, indices;
+ std::tie(output, indices) = Type::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool3d", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool3d", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
std::tuple<Tensor,Tensor> VariableType::max_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const {
profiler::RecordFunction profiler("max_pool3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxPool3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaxPool3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -7444,10 +7460,11 @@
grad_fn->dilation = dilation;
grad_fn->ceil_mode = ceil_mode;
}
- auto ret = as_variable(baseType->max_pool3d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, indices;
+ std::tie(output, indices) = as_variable(baseType->max_pool3d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool3d_forward", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool3d_forward", { self }, { output, indices } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
@@ -7455,10 +7472,9 @@
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
if (grad_fn) {
- auto& indices = std::get<1>(ret);
grad_fn->indices_ = SavedVariable(indices, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(indices));
}
Tensor VariableType::max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const {
profiler::RecordFunction profiler("max_pool3d_backward");
@@ -7466,52 +7482,51 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 7);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, indices });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, indices })) {
grad_fn = std::make_shared<Error>("the derivative for max_pool3d_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ grad_output, self, indices });
+
}
- auto ret = as_variable(baseType->max_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_));
- set_history({ ret }, grad_fn);
+ auto grad_input = as_variable(baseType->max_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool3d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool3d_backward", { grad_output, self, indices }, { grad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) const {
profiler::RecordFunction profiler("max_unpool2d");
- auto ret = Type::max_unpool2d(self, indices, output_size);
+ auto output = Type::max_unpool2d(self, indices, output_size);
if (jit::tracer::isTracing( self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_unpool2d", { self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool2d", { self, indices }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::max_unpool2d_forward(const Tensor & self, const Tensor & indices, IntList output_size) const {
profiler::RecordFunction profiler("max_unpool2d_forward");
auto& self_ = unpack(self, "self", 0);
auto& indices_ = unpack_long(indices, "indices", 1);
std::shared_ptr<MaxUnpool2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaxUnpool2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->indices_ = SavedVariable(indices, false);
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->max_unpool2d_forward(self_, indices_, output_size));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->max_unpool2d_forward(self_, indices_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_forward", { self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_forward", { self, indices }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) const {
profiler::RecordFunction profiler("max_unpool2d_backward");
@@ -7519,40 +7534,38 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 2);
std::shared_ptr<MaxUnpool2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<MaxUnpool2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->indices_ = SavedVariable(indices, false);
grad_fn->output_size = output_size;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->max_unpool2d_backward(grad_output_, self_, indices_, output_size));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->max_unpool2d_backward(grad_output_, self_, indices_, output_size));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_backward", { grad_output, self, indices }, { grad_input } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("max_unpool3d");
- auto ret = Type::max_unpool3d(self, indices, output_size, stride, padding);
+ auto output = Type::max_unpool3d(self, indices, output_size, stride, padding);
if (jit::tracer::isTracing( self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_unpool3d", { self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool3d", { self, indices }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::max_unpool3d_forward(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("max_unpool3d_forward");
auto& self_ = unpack(self, "self", 0);
auto& indices_ = unpack_long(indices, "indices", 1);
std::shared_ptr<MaxUnpool3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<MaxUnpool3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
@@ -7561,15 +7574,15 @@
grad_fn->stride = stride;
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->max_unpool3d_forward(self_, indices_, output_size, stride, padding));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->max_unpool3d_forward(self_, indices_, output_size, stride, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_forward", { self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_forward", { self, indices }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("max_unpool3d_backward");
@@ -7577,617 +7590,621 @@
auto& self_ = unpack(self, "self", 1);
auto& indices_ = unpack_long(indices, "indices", 2);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, indices });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, indices })) {
grad_fn = std::make_shared<Error>("the derivative for max_unpool3d_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ grad_output, self, indices });
+
}
- auto ret = as_variable(baseType->max_unpool3d_backward(grad_output_, self_, indices_, output_size, stride, padding));
- set_history({ ret }, grad_fn);
+ auto grad_input = as_variable(baseType->max_unpool3d_backward(grad_output_, self_, indices_, output_size, stride, padding));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_backward", { grad_output, self, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_backward", { grad_output, self, indices }, { grad_input } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::reflection_pad1d(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("reflection_pad1d");
- auto ret = Type::reflection_pad1d(self, padding);
+ auto output = Type::reflection_pad1d(self, padding);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::reflection_pad1d_forward(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("reflection_pad1d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReflectionPad1DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReflectionPad1DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->reflection_pad1d_forward(self_, padding));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->reflection_pad1d_forward(self_, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("reflection_pad1d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<ReflectionPad1DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<ReflectionPad1DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->padding = padding;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->reflection_pad1d_backward(grad_output_, self_, padding));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->reflection_pad1d_backward(grad_output_, self_, padding));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::reflection_pad2d(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("reflection_pad2d");
- auto ret = Type::reflection_pad2d(self, padding);
+ auto output = Type::reflection_pad2d(self, padding);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::reflection_pad2d_forward(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("reflection_pad2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReflectionPad2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReflectionPad2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->reflection_pad2d_forward(self_, padding));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->reflection_pad2d_forward(self_, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("reflection_pad2d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<ReflectionPad2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<ReflectionPad2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->padding = padding;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->reflection_pad2d_backward(grad_output_, self_, padding));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->reflection_pad2d_backward(grad_output_, self_, padding));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::replication_pad1d(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad1d");
- auto ret = Type::replication_pad1d(self, padding);
+ auto output = Type::replication_pad1d(self, padding);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad1d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad1d", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::replication_pad1d_forward(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad1d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReplicationPad1DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReplicationPad1DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->replication_pad1d_forward(self_, padding));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->replication_pad1d_forward(self_, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad1d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<ReplicationPad1DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<ReplicationPad1DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->padding = padding;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->replication_pad1d_backward(grad_output_, self_, padding));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->replication_pad1d_backward(grad_output_, self_, padding));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::replication_pad2d(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad2d");
- auto ret = Type::replication_pad2d(self, padding);
+ auto output = Type::replication_pad2d(self, padding);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad2d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad2d", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::replication_pad2d_forward(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReplicationPad2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReplicationPad2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->replication_pad2d_forward(self_, padding));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->replication_pad2d_forward(self_, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad2d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<ReplicationPad2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<ReplicationPad2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->padding = padding;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->replication_pad2d_backward(grad_output_, self_, padding));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->replication_pad2d_backward(grad_output_, self_, padding));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::replication_pad3d(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad3d");
- auto ret = Type::replication_pad3d(self, padding);
+ auto output = Type::replication_pad3d(self, padding);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad3d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad3d", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::replication_pad3d_forward(const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReplicationPad3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ReplicationPad3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->replication_pad3d_forward(self_, padding));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->replication_pad3d_forward(self_, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const {
profiler::RecordFunction profiler("replication_pad3d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<ReplicationPad3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<ReplicationPad3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->padding = padding;
grad_fn->self_info = self;
}
- auto ret = as_variable(baseType->replication_pad3d_backward(grad_output_, self_, padding));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->replication_pad3d_backward(grad_output_, self_, padding));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::upsample_linear1d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("upsample_linear1d");
- auto ret = Type::upsample_linear1d(self, output_size);
+ auto output = Type::upsample_linear1d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_linear1d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("upsample_linear1d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UpsampleLinear1DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UpsampleLinear1DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->upsample_linear1d_forward(self_, output_size));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->upsample_linear1d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const {
profiler::RecordFunction profiler("upsample_linear1d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
std::shared_ptr<UpsampleLinear1DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output })) {
grad_fn = std::make_shared<UpsampleLinear1DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output });
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->upsample_linear1d_backward(grad_output_, output_size, input_size));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->upsample_linear1d_backward(grad_output_, output_size, input_size));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_backward", { grad_output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_backward", { grad_output }, { grad_input } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
setattr(n, jit::stringToSymbol("input_size"), input_size);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::upsample_bilinear2d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("upsample_bilinear2d");
- auto ret = Type::upsample_bilinear2d(self, output_size);
+ auto output = Type::upsample_bilinear2d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_bilinear2d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("upsample_bilinear2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UpsampleBilinear2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UpsampleBilinear2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->upsample_bilinear2d_forward(self_, output_size));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->upsample_bilinear2d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const {
profiler::RecordFunction profiler("upsample_bilinear2d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
std::shared_ptr<UpsampleBilinear2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output })) {
grad_fn = std::make_shared<UpsampleBilinear2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output });
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->upsample_bilinear2d_backward(grad_output_, output_size, input_size));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->upsample_bilinear2d_backward(grad_output_, output_size, input_size));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_backward", { grad_output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_backward", { grad_output }, { grad_input } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
setattr(n, jit::stringToSymbol("input_size"), input_size);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::upsample_trilinear3d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("upsample_trilinear3d");
- auto ret = Type::upsample_trilinear3d(self, output_size);
+ auto output = Type::upsample_trilinear3d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_trilinear3d_forward(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("upsample_trilinear3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UpsampleTrilinear3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UpsampleTrilinear3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->upsample_trilinear3d_forward(self_, output_size));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->upsample_trilinear3d_forward(self_, output_size));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const {
profiler::RecordFunction profiler("upsample_trilinear3d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
std::shared_ptr<UpsampleTrilinear3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output })) {
grad_fn = std::make_shared<UpsampleTrilinear3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output });
grad_fn->output_size = output_size;
}
- auto ret = as_variable(baseType->upsample_trilinear3d_backward(grad_output_, output_size, input_size));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->upsample_trilinear3d_backward(grad_output_, output_size, input_size));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_backward", { grad_output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_backward", { grad_output }, { grad_input } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
setattr(n, jit::stringToSymbol("input_size"), input_size);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::upsample_nearest1d(const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest1d");
- auto ret = Type::upsample_nearest1d(self, scale_factor);
+ auto output = Type::upsample_nearest1d(self, scale_factor);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d", { self }, { output } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_nearest1d_forward(const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest1d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UpsampleNearest1DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UpsampleNearest1DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->scale_factor = scale_factor;
}
- auto ret = as_variable(baseType->upsample_nearest1d_forward(self_, scale_factor));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->upsample_nearest1d_forward(self_, scale_factor));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_nearest1d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest1d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<UpsampleNearest1DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<UpsampleNearest1DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->scale_factor = scale_factor;
}
- auto ret = as_variable(baseType->upsample_nearest1d_backward(grad_output_, self_, scale_factor));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->upsample_nearest1d_backward(grad_output_, self_, scale_factor));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::upsample_nearest2d(const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest2d");
- auto ret = Type::upsample_nearest2d(self, scale_factor);
+ auto output = Type::upsample_nearest2d(self, scale_factor);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d", { self }, { output } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_nearest2d_forward(const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest2d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UpsampleNearest2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UpsampleNearest2DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->scale_factor = scale_factor;
}
- auto ret = as_variable(baseType->upsample_nearest2d_forward(self_, scale_factor));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->upsample_nearest2d_forward(self_, scale_factor));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_nearest2d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest2d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<UpsampleNearest2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<UpsampleNearest2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->scale_factor = scale_factor;
}
- auto ret = as_variable(baseType->upsample_nearest2d_backward(grad_output_, self_, scale_factor));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->upsample_nearest2d_backward(grad_output_, self_, scale_factor));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::upsample_nearest3d(const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest3d");
- auto ret = Type::upsample_nearest3d(self, scale_factor);
+ auto output = Type::upsample_nearest3d(self, scale_factor);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d", { self }, { output } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_nearest3d_forward(const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest3d_forward");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UpsampleNearest3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UpsampleNearest3DBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->scale_factor = scale_factor;
}
- auto ret = as_variable(baseType->upsample_nearest3d_forward(self_, scale_factor));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->upsample_nearest3d_forward(self_, scale_factor));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_forward", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_forward", { self }, { output } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::upsample_nearest3d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const {
profiler::RecordFunction profiler("upsample_nearest3d_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<UpsampleNearest3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self })) {
grad_fn = std::make_shared<UpsampleNearest3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self });
grad_fn->scale_factor = scale_factor;
}
- auto ret = as_variable(baseType->upsample_nearest3d_backward(grad_output_, self_, scale_factor));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->upsample_nearest3d_backward(grad_output_, self_, scale_factor));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_backward", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_backward", { grad_output, self }, { grad_input } );
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor);
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::_sigmoid(const Tensor & self) const {
profiler::RecordFunction profiler("_sigmoid");
- auto ret = Type::_sigmoid(self);
+ auto output = Type::_sigmoid(self);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "_sigmoid", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_sigmoid", { self }, { output } );
(void)n;
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::_sigmoid_forward(const Tensor & self) const {
- throw std::runtime_error("VariableType::_sigmoid_forward NYI");
+ profiler::RecordFunction profiler("_sigmoid_forward");
+ auto& self_ = unpack(self, "self", 0);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for _sigmoid_forward is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ auto output = as_variable(baseType->_sigmoid_forward(self_));
+ set_history(output, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "_sigmoid_forward", { self }, { output } );
+ (void)n;
+ }
+ return output;
}
Tensor VariableType::_sigmoid_backward(const Tensor & grad_output, const Tensor & output) const {
profiler::RecordFunction profiler("_sigmoid_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& output_ = unpack(output, "output", 1);
std::shared_ptr<SigmoidBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, output });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, output })) {
grad_fn = std::make_shared<SigmoidBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, output });
grad_fn->output_ = SavedVariable(output, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->_sigmoid_backward(grad_output_, output_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->_sigmoid_backward(grad_output_, output_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, output )) {
- jit::Node *n = jit::tracer::recordTrace( "_sigmoid_backward", { grad_output, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_sigmoid_backward", { grad_output, output }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::_tanh(const Tensor & self) const {
profiler::RecordFunction profiler("_tanh");
- auto ret = Type::_tanh(self);
+ auto output = Type::_tanh(self);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "_tanh", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_tanh", { self }, { output } );
(void)n;
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::_tanh_forward(const Tensor & self) const {
- throw std::runtime_error("VariableType::_tanh_forward NYI");
+ profiler::RecordFunction profiler("_tanh_forward");
+ auto& self_ = unpack(self, "self", 0);
+ std::shared_ptr<Error> grad_fn;
+ if (compute_requires_grad({ self })) {
+ grad_fn = std::make_shared<Error>("the derivative for _tanh_forward is not implemented");
+ grad_fn->next_functions = compute_next_functions({ self });
+
+ }
+ auto output = as_variable(baseType->_tanh_forward(self_));
+ set_history(output, grad_fn);
+ if (jit::tracer::isTracing( self )) {
+ jit::Node *n = jit::tracer::recordTrace( "_tanh_forward", { self }, { output } );
+ (void)n;
+ }
+ return output;
}
Tensor VariableType::_tanh_backward(const Tensor & grad_output, const Tensor & output) const {
profiler::RecordFunction profiler("_tanh_backward");
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& output_ = unpack(output, "output", 1);
std::shared_ptr<TanhBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, output });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, output })) {
grad_fn = std::make_shared<TanhBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, output });
grad_fn->output_ = SavedVariable(output, false);
grad_fn->grad_output_ = SavedVariable(grad_output, false);
}
- auto ret = as_variable(baseType->_tanh_backward(grad_output_, output_));
- set_history(ret, grad_fn);
+ auto grad_input = as_variable(baseType->_tanh_backward(grad_output_, output_));
+ set_history(grad_input, grad_fn);
if (jit::tracer::isTracing( grad_output, output )) {
- jit::Node *n = jit::tracer::recordTrace( "_tanh_backward", { grad_output, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_tanh_backward", { grad_output, output }, { grad_input } );
(void)n;
}
- return Tensor(std::move(ret));
+ return grad_input;
}
Tensor VariableType::thnn_batch_norm(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const {
profiler::RecordFunction profiler("thnn_batch_norm");
- auto ret = Type::thnn_batch_norm(self, weight, bias, running_mean, running_var, training, momentum, eps);
+ auto output = Type::thnn_batch_norm(self, weight, bias, running_mean, running_var, training, momentum, eps);
if (jit::tracer::isTracing( self, weight, bias, running_mean, running_var )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm", { self, weight, bias, running_mean, running_var }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm", { self, weight, bias, running_mean, running_var }, { output } );
setattr(n, jit::stringToSymbol("training"), training);
setattr(n, jit::stringToSymbol("momentum"), momentum);
setattr(n, jit::stringToSymbol("eps"), eps);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_forward(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const {
profiler::RecordFunction profiler("thnn_batch_norm_forward");
@@ -8199,8 +8216,7 @@
check_no_requires_grad(running_mean, "running_mean");
check_no_requires_grad(running_var, "running_var");
std::shared_ptr<ThnnBatchNormBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnBatchNormBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8210,21 +8226,20 @@
grad_fn->training = training;
grad_fn->eps = eps;
}
- auto ret = as_variable(baseType->thnn_batch_norm_forward(self_, weight_, bias_, running_mean_, running_var_, training, momentum, eps));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, save_mean, save_std;
+ std::tie(output, save_mean, save_std) = as_variable(baseType->thnn_batch_norm_forward(self_, weight_, bias_, running_mean_, running_var_, training, momentum, eps));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias, running_mean, running_var )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_forward", { self, weight, bias, running_mean, running_var }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_forward", { self, weight, bias, running_mean, running_var }, { output, save_mean, save_std } );
setattr(n, jit::stringToSymbol("training"), training);
setattr(n, jit::stringToSymbol("momentum"), momentum);
setattr(n, jit::stringToSymbol("eps"), eps);
}
if (grad_fn) {
- auto& save_mean = std::get<1>(ret);
grad_fn->save_mean_ = SavedVariable(save_mean, true);
- auto& save_std = std::get<2>(ret);
grad_fn->save_std_ = SavedVariable(save_std, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(save_mean), std::move(save_std));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, bool training, double eps, const Tensor & save_mean, const Tensor & save_std, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_batch_norm_backward");
@@ -8238,8 +8253,7 @@
check_no_requires_grad(running_mean, "running_mean");
check_no_requires_grad(running_var, "running_var");
std::shared_ptr<ThnnBatchNormBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight, save_mean, save_std });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight, save_mean, save_std })) {
grad_fn = std::make_shared<ThnnBatchNormBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, save_mean, save_std });
grad_fn->save_mean_ = SavedVariable(save_mean, false);
@@ -8252,28 +8266,29 @@
grad_fn->training = training;
grad_fn->eps = eps;
}
- auto ret = as_variable(baseType->thnn_batch_norm_backward(grad_output_, self_, weight_, running_mean_, running_var_, training, eps, save_mean_, save_std_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_batch_norm_backward(grad_output_, self_, weight_, running_mean_, running_var_, training, eps, save_mean_, save_std_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, running_mean, running_var, save_mean, save_std )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_backward", { grad_output, self, weight, running_mean, running_var, save_mean, save_std }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_backward", { grad_output, self, weight, running_mean, running_var, save_mean, save_std }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("training"), training);
setattr(n, jit::stringToSymbol("eps"), eps);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_transpose2d");
- auto ret = Type::thnn_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
+ auto output = Type::thnn_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_transpose2d_forward");
@@ -8281,8 +8296,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 3);
std::shared_ptr<ThnnConvTranspose2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConvTranspose2DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8293,10 +8307,11 @@
grad_fn->output_padding = output_padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_transpose2d_forward(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, columns, ones;
+ std::tie(output, columns, ones) = as_variable(baseType->thnn_conv_transpose2d_forward(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_forward", { self, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_forward", { self, weight, bias }, { output, columns, ones } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
@@ -8304,12 +8319,10 @@
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
if (grad_fn) {
- auto& columns = std::get<1>(ret);
grad_fn->columns_ = SavedVariable(columns, true);
- auto& ones = std::get<2>(ret);
grad_fn->ones_ = SavedVariable(ones, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(columns), std::move(ones));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_conv_transpose2d_backward");
@@ -8321,8 +8334,7 @@
check_no_requires_grad(columns, "columns");
check_no_requires_grad(ones, "ones");
std::shared_ptr<ThnnConvTranspose2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConvTranspose2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8333,10 +8345,11 @@
grad_fn->output_padding = output_padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_transpose2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, output_padding, dilation, columns_, ones_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_transpose2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, output_padding, dilation, columns_, ones_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_backward", { grad_output, self, weight, columns, ones }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_backward", { grad_output, self, weight, columns, ones }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
@@ -8344,19 +8357,19 @@
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_transpose3d");
- auto ret = Type::thnn_conv_transpose3d(self, weight, bias, stride, padding, output_padding, dilation);
+ auto output = Type::thnn_conv_transpose3d(self, weight, bias, stride, padding, output_padding, dilation);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_transpose3d_forward");
@@ -8364,8 +8377,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 2);
std::shared_ptr<ThnnConvTranspose3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConvTranspose3DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8375,22 +8387,21 @@
grad_fn->output_padding = output_padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_transpose3d_forward(self_, weight_, bias_, stride, padding, output_padding, dilation));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, finput, fgrad_input;
+ std::tie(output, finput, fgrad_input) = as_variable(baseType->thnn_conv_transpose3d_forward(self_, weight_, bias_, stride, padding, output_padding, dilation));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_forward", { self, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_forward", { self, weight, bias }, { output, finput, fgrad_input } );
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
if (grad_fn) {
- auto& finput = std::get<1>(ret);
grad_fn->finput_ = SavedVariable(finput, true);
- auto& fgrad_input = std::get<2>(ret);
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_conv_transpose3d_backward");
@@ -8402,8 +8413,7 @@
check_no_requires_grad(finput, "finput");
check_no_requires_grad(fgrad_input, "fgrad_input");
std::shared_ptr<ThnnConvTranspose3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConvTranspose3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8414,28 +8424,29 @@
grad_fn->output_padding = output_padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_transpose3d_backward(grad_output_, self_, weight_, stride, padding, output_padding, dilation, finput_, fgrad_input_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_transpose3d_backward(grad_output_, self_, weight_, stride, padding, output_padding, dilation, finput_, fgrad_input_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_backward", { grad_output, self, weight, finput, fgrad_input }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_backward", { grad_output, self, weight, finput, fgrad_input }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::thnn_conv2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("thnn_conv2d");
- auto ret = Type::thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
+ auto output = Type::thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("thnn_conv2d_forward");
@@ -8443,8 +8454,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 3);
std::shared_ptr<ThnnConv2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConv2DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8453,21 +8463,20 @@
grad_fn->stride = stride;
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->thnn_conv2d_forward(self_, weight_, kernel_size, bias_, stride, padding));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, finput, fgrad_input;
+ std::tie(output, finput, fgrad_input) = as_variable(baseType->thnn_conv2d_forward(self_, weight_, kernel_size, bias_, stride, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_forward", { self, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_forward", { self, weight, bias }, { output, finput, fgrad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
if (grad_fn) {
- auto& finput = std::get<1>(ret);
grad_fn->finput_ = SavedVariable(finput, true);
- auto& fgrad_input = std::get<2>(ret);
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_conv2d_backward");
@@ -8479,8 +8488,7 @@
check_no_requires_grad(finput, "finput");
check_no_requires_grad(fgrad_input, "fgrad_input");
std::shared_ptr<ThnnConv2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConv2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8489,28 +8497,29 @@
grad_fn->stride = stride;
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->thnn_conv2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_backward", { grad_output, self, weight, finput, fgrad_input }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_backward", { grad_output, self, weight, finput, fgrad_input }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_depthwise2d");
- auto ret = Type::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation);
+ auto output = Type::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
- return Tensor(std::move(ret));
+ return output;
}
Tensor VariableType::thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_depthwise2d_forward");
@@ -8518,8 +8527,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 3);
std::shared_ptr<ThnnConvDepthwise2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConvDepthwise2DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8529,16 +8537,16 @@
grad_fn->padding = padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_depthwise2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->thnn_conv_depthwise2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_forward", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_forward", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) const {
profiler::RecordFunction profiler("thnn_conv_depthwise2d_backward");
@@ -8546,8 +8554,7 @@
auto& self_ = unpack(self, "self", 1);
auto& weight_ = unpack(weight, "weight", 2);
std::shared_ptr<ThnnConvDepthwise2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConvDepthwise2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8557,28 +8564,29 @@
grad_fn->padding = padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_depthwise2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight;
+ std::tie(grad_input, grad_weight) = as_variable(baseType->thnn_conv_depthwise2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, output_mask));
+ set_history({ grad_input, grad_weight }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_backward", { grad_output, self, weight }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_backward", { grad_output, self, weight }, { grad_input, grad_weight } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight));
}
Tensor VariableType::thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("thnn_conv3d");
- auto ret = Type::thnn_conv3d(self, weight, kernel_size, bias, stride, padding);
+ auto output = Type::thnn_conv3d(self, weight, kernel_size, bias, stride, padding);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const {
profiler::RecordFunction profiler("thnn_conv3d_forward");
@@ -8586,8 +8594,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 3);
std::shared_ptr<ThnnConv3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConv3DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8596,21 +8603,20 @@
grad_fn->stride = stride;
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->thnn_conv3d_forward(self_, weight_, kernel_size, bias_, stride, padding));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, finput, fgrad_input;
+ std::tie(output, finput, fgrad_input) = as_variable(baseType->thnn_conv3d_forward(self_, weight_, kernel_size, bias_, stride, padding));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_forward", { self, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_forward", { self, weight, bias }, { output, finput, fgrad_input } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
}
if (grad_fn) {
- auto& finput = std::get<1>(ret);
grad_fn->finput_ = SavedVariable(finput, true);
- auto& fgrad_input = std::get<2>(ret);
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_conv3d_backward");
@@ -8622,8 +8628,7 @@
check_no_requires_grad(finput, "finput");
check_no_requires_grad(fgrad_input, "fgrad_input");
std::shared_ptr<ThnnConv3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConv3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8632,28 +8637,29 @@
grad_fn->stride = stride;
grad_fn->padding = padding;
}
- auto ret = as_variable(baseType->thnn_conv3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_backward", { grad_output, self, weight, finput, fgrad_input }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_backward", { grad_output, self, weight, finput, fgrad_input }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_dilated2d");
- auto ret = Type::thnn_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation);
+ auto output = Type::thnn_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_dilated2d_forward");
@@ -8661,8 +8667,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 3);
std::shared_ptr<ThnnConvDilated2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConvDilated2DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8672,22 +8677,21 @@
grad_fn->padding = padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_dilated2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, columns, ones;
+ std::tie(output, columns, ones) = as_variable(baseType->thnn_conv_dilated2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_forward", { self, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_forward", { self, weight, bias }, { output, columns, ones } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
if (grad_fn) {
- auto& columns = std::get<1>(ret);
grad_fn->columns_ = SavedVariable(columns, true);
- auto& ones = std::get<2>(ret);
grad_fn->ones_ = SavedVariable(ones, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(columns), std::move(ones));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_conv_dilated2d_backward");
@@ -8699,8 +8703,7 @@
check_no_requires_grad(columns, "columns");
check_no_requires_grad(ones, "ones");
std::shared_ptr<ThnnConvDilated2DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConvDilated2DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8710,29 +8713,30 @@
grad_fn->padding = padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_dilated2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_dilated2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_backward", { grad_output, self, weight, columns, ones }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_backward", { grad_output, self, weight, columns, ones }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_dilated3d");
- auto ret = Type::thnn_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation);
+ auto output = Type::thnn_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d", { self, weight, bias }, { output } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const {
profiler::RecordFunction profiler("thnn_conv_dilated3d_forward");
@@ -8740,8 +8744,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 3);
std::shared_ptr<ThnnConvDilated3DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ThnnConvDilated3DBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8751,22 +8754,21 @@
grad_fn->padding = padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_dilated3d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor output, columns, ones;
+ std::tie(output, columns, ones) = as_variable(baseType->thnn_conv_dilated3d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_forward", { self, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_forward", { self, weight, bias }, { output, columns, ones } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
}
if (grad_fn) {
- auto& columns = std::get<1>(ret);
grad_fn->columns_ = SavedVariable(columns, true);
- auto& ones = std::get<2>(ret);
grad_fn->ones_ = SavedVariable(ones, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(output), std::move(columns), std::move(ones));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("thnn_conv_dilated3d_backward");
@@ -8778,8 +8780,7 @@
check_no_requires_grad(columns, "columns");
check_no_requires_grad(ones, "ones");
std::shared_ptr<ThnnConvDilated3DBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ grad_output, self, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ grad_output, self, weight })) {
grad_fn = std::make_shared<ThnnConvDilated3DBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight });
grad_fn->grad_output_ = SavedVariable(grad_output, false);
@@ -8789,83 +8790,85 @@
grad_fn->padding = padding;
grad_fn->dilation = dilation;
}
- auto ret = as_variable(baseType->thnn_conv_dilated3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor grad_input, grad_weight, grad_bias;
+ std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_dilated3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask));
+ set_history({ grad_input, grad_weight, grad_bias }, grad_fn);
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) {
- jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_backward", { grad_output, self, weight, columns, ones }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_backward", { grad_output, self, weight, columns, ones }, { grad_input, grad_weight, grad_bias } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias));
}
Tensor VariableType::adaptive_avg_pool1d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_avg_pool1d");
- auto ret = Type::adaptive_avg_pool1d(self, output_size);
+ auto result = Type::adaptive_avg_pool1d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool1d", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool1d", { self }, { result } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool1d(const Tensor & self, IntList output_size) const {
profiler::RecordFunction profiler("adaptive_max_pool1d");
- auto ret = Type::adaptive_max_pool1d(self, output_size);
+ Tensor result0, result1;
+ std::tie(result0, result1) = Type::adaptive_max_pool1d(self, output_size);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool1d", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool1d", { self }, { result0, result1 } );
setattr(n, jit::stringToSymbol("output_size"), output_size);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1));
}
bool VariableType::allclose(const Tensor & self, const Tensor & other, double rtol, double atol) const {
- auto& self_ = unpack(self, "self", 0);
- auto& other_ = unpack(other, "other", 1);
- return baseType->allclose(self_, other_, rtol, atol);
+ profiler::RecordFunction profiler("allclose");
+ auto result = Type::allclose(self, other, rtol, atol);
+ return result;
}
Tensor VariableType::batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) const {
profiler::RecordFunction profiler("batch_norm");
- auto ret = Type::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
+ auto result = Type::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
if (jit::tracer::isTracing( input, weight, bias, running_mean, running_var )) {
- jit::Node *n = jit::tracer::recordTrace( "batch_norm", { input, weight, bias, running_mean, running_var }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "batch_norm", { input, weight, bias, running_mean, running_var }, { result } );
setattr(n, jit::stringToSymbol("training"), training);
setattr(n, jit::stringToSymbol("momentum"), momentum);
setattr(n, jit::stringToSymbol("eps"), eps);
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::bernoulli_(Tensor & self, const Tensor & p, Generator * generator) const {
profiler::RecordFunction profiler("bernoulli_");
- auto ret = Type::bernoulli_(self, p, generator);
+ Type::bernoulli_(self, p, generator);
return self;
}
Tensor & VariableType::bernoulli_(Tensor & self, double p, Generator * generator) const {
profiler::RecordFunction profiler("bernoulli_");
- auto ret = Type::bernoulli_(self, p, generator);
+ Type::bernoulli_(self, p, generator);
return self;
}
std::vector<Tensor> VariableType::chunk(const Tensor & self, int64_t chunks, int64_t dim) const {
profiler::RecordFunction profiler("chunk");
- auto ret = Type::chunk(self, chunks, dim);
+ auto result = Type::chunk(self, chunks, dim);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "chunk", { self }, cast_tensor_list(ret) );
+ jit::Node *n = jit::tracer::recordTrace( "chunk", { self }, flatten(result) );
setattr(n, jit::stringToSymbol("chunks"), chunks);
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return ret;
+ return result;
}
Tensor VariableType::convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups) const {
profiler::RecordFunction profiler("convolution");
- auto ret = Type::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
- return Tensor(std::move(ret));
+ auto result = Type::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
+ return result;
}
Tensor VariableType::_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) const {
profiler::RecordFunction profiler("_convolution");
- auto ret = Type::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
+ auto result = Type::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
if (jit::tracer::isTracing( input, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "_convolution", { input, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_convolution", { input, weight, bias }, { result } );
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
@@ -8876,39 +8879,27 @@
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::_convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding) const {
profiler::RecordFunction profiler("_convolution_nogroup");
- auto ret = Type::_convolution_nogroup(input, weight, bias, stride, padding, dilation, transposed, output_padding);
+ auto result = Type::_convolution_nogroup(input, weight, bias, stride, padding, dilation, transposed, output_padding);
if (jit::tracer::isTracing( input, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "_convolution_nogroup", { input, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_convolution_nogroup", { input, weight, bias }, { result } );
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("transposed"), transposed);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::_convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("_convolution_double_backward");
- auto ggI_ = unpack_opt(ggI, "ggI", 0);
- auto ggW_ = unpack_opt(ggW, "ggW", 1);
- auto ggb_ = unpack_opt(ggb, "ggb", 2);
- auto& gO_ = unpack(gO, "gO", 3);
- auto& weight_ = unpack(weight, "weight", 4);
- auto& self_ = unpack(self, "self", 5);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ ggI, ggW, ggb, gO, weight, self });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for _convolution_double_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ ggI, ggW, ggb, gO, weight, self });
- }
- auto ret = as_variable(baseType->_convolution_double_backward(ggI_, ggW_, ggb_, gO_, weight_, self_, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = Type::_convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask);
if (jit::tracer::isTracing( ggI, ggW, ggb, gO, weight, self )) {
- jit::Node *n = jit::tracer::recordTrace( "_convolution_double_backward", { ggI, ggW, ggb, gO, weight, self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "_convolution_double_backward", { ggI, ggW, ggb, gO, weight, self }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
@@ -8920,22 +8911,22 @@
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
Tensor VariableType::conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const {
profiler::RecordFunction profiler("conv1d");
- auto ret = Type::conv1d(input, weight, bias, stride, padding, dilation, groups);
- return Tensor(std::move(ret));
+ auto result = Type::conv1d(input, weight, bias, stride, padding, dilation, groups);
+ return result;
}
Tensor VariableType::conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const {
profiler::RecordFunction profiler("conv2d");
- auto ret = Type::conv2d(input, weight, bias, stride, padding, dilation, groups);
- return Tensor(std::move(ret));
+ auto result = Type::conv2d(input, weight, bias, stride, padding, dilation, groups);
+ return result;
}
Tensor VariableType::conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const {
profiler::RecordFunction profiler("conv3d");
- auto ret = Type::conv3d(input, weight, bias, stride, padding, dilation, groups);
- return Tensor(std::move(ret));
+ auto result = Type::conv3d(input, weight, bias, stride, padding, dilation, groups);
+ return result;
}
Tensor VariableType::conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) const {
profiler::RecordFunction profiler("conv_tbc");
@@ -8943,8 +8934,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto& bias_ = unpack(bias, "bias", 2);
std::shared_ptr<ConvTbcBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<ConvTbcBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -8952,55 +8942,44 @@
grad_fn->bias_ = SavedVariable(bias, false);
grad_fn->pad = pad;
}
- auto ret = as_variable(baseType->conv_tbc(self_, weight_, bias_, pad));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->conv_tbc(self_, weight_, bias_, pad));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "conv_tbc", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "conv_tbc", { self, weight, bias }, { result } );
setattr(n, jit::stringToSymbol("pad"), pad);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad) const {
profiler::RecordFunction profiler("conv_tbc_backward");
- auto& self_ = unpack(self, "self", 0);
- auto& input_ = unpack(input, "input", 1);
- auto& weight_ = unpack(weight, "weight", 2);
- auto& bias_ = unpack(bias, "bias", 3);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ self, input, weight, bias });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for conv_tbc_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ self, input, weight, bias });
- }
- auto ret = as_variable(baseType->conv_tbc_backward(self_, input_, weight_, bias_, pad));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = Type::conv_tbc_backward(self, input, weight, bias, pad);
if (jit::tracer::isTracing( self, input, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "conv_tbc_backward", { self, input, weight, bias }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "conv_tbc_backward", { self, input, weight, bias }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("pad"), pad);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
Tensor VariableType::conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const {
profiler::RecordFunction profiler("conv_transpose1d");
- auto ret = Type::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation);
- return Tensor(std::move(ret));
+ auto result = Type::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation);
+ return result;
}
Tensor VariableType::conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const {
profiler::RecordFunction profiler("conv_transpose2d");
- auto ret = Type::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation);
- return Tensor(std::move(ret));
+ auto result = Type::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation);
+ return result;
}
Tensor VariableType::conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const {
profiler::RecordFunction profiler("conv_transpose3d");
- auto ret = Type::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation);
- return Tensor(std::move(ret));
+ auto result = Type::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation);
+ return result;
}
Tensor VariableType::cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) const {
profiler::RecordFunction profiler("cudnn_affine_grid_generator");
auto& theta_ = unpack(theta, "theta", 0);
std::shared_ptr<CudnnAffineGridGeneratorBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ theta });
- if (requires_grad) {
+ if (compute_requires_grad({ theta })) {
grad_fn = std::make_shared<CudnnAffineGridGeneratorBackward>();
grad_fn->next_functions = compute_next_functions({ theta });
grad_fn->N = N;
@@ -9008,36 +8987,28 @@
grad_fn->H = H;
grad_fn->W = W;
}
- auto ret = as_variable(baseType->cudnn_affine_grid_generator(theta_, N, C, H, W));
- set_history(ret, grad_fn);
+ auto grid = as_variable(baseType->cudnn_affine_grid_generator(theta_, N, C, H, W));
+ set_history(grid, grad_fn);
if (jit::tracer::isTracing( theta )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator", { theta }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator", { theta }, { grid } );
setattr(n, jit::stringToSymbol("N"), N);
setattr(n, jit::stringToSymbol("C"), C);
setattr(n, jit::stringToSymbol("H"), H);
setattr(n, jit::stringToSymbol("W"), W);
}
- return Tensor(std::move(ret));
+ return grid;
}
Tensor VariableType::cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) const {
profiler::RecordFunction profiler("cudnn_affine_grid_generator_backward");
- auto& grad_ = unpack(grad, "grad", 0);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for cudnn_affine_grid_generator_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ grad });
- }
- auto ret = as_variable(baseType->cudnn_affine_grid_generator_backward(grad_, N, C, H, W));
- set_history({ ret }, grad_fn);
+ auto grad_theta = Type::cudnn_affine_grid_generator_backward(grad, N, C, H, W);
if (jit::tracer::isTracing( grad )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator_backward", { grad }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator_backward", { grad }, { grad_theta } );
setattr(n, jit::stringToSymbol("N"), N);
setattr(n, jit::stringToSymbol("C"), C);
setattr(n, jit::stringToSymbol("H"), H);
setattr(n, jit::stringToSymbol("W"), W);
}
- return Tensor(std::move(ret));
+ return grad_theta;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) const {
profiler::RecordFunction profiler("cudnn_batch_norm");
@@ -9049,8 +9020,7 @@
check_no_requires_grad(running_mean, "running_mean");
check_no_requires_grad(running_var, "running_var");
std::shared_ptr<CudnnBatchNormBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ input, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ input, weight, bias })) {
grad_fn = std::make_shared<CudnnBatchNormBackward>();
grad_fn->next_functions = compute_next_functions({ input, weight, bias });
grad_fn->input_ = SavedVariable(input, false);
@@ -9060,21 +9030,20 @@
grad_fn->training = training;
grad_fn->epsilon = epsilon;
}
- auto ret = as_variable(baseType->cudnn_batch_norm(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = as_variable(baseType->cudnn_batch_norm(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon));
+ set_history({ result0, result1, result2 }, grad_fn);
if (jit::tracer::isTracing( input, weight, bias, running_mean, running_var )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm", { input, weight, bias, running_mean, running_var }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm", { input, weight, bias, running_mean, running_var }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("training"), training);
setattr(n, jit::stringToSymbol("exponential_average_factor"), exponential_average_factor);
setattr(n, jit::stringToSymbol("epsilon"), epsilon);
}
if (grad_fn) {
- auto& result1 = std::get<1>(ret);
grad_fn->result1_ = SavedVariable(result1, true);
- auto& result2 = std::get<2>(ret);
grad_fn->result2_ = SavedVariable(result2, true);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon) const {
profiler::RecordFunction profiler("cudnn_batch_norm_backward");
@@ -9088,8 +9057,7 @@
check_no_requires_grad(running_mean, "running_mean");
check_no_requires_grad(running_var, "running_var");
std::shared_ptr<CudnnBatchNormBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ input, grad_output, weight, save_mean, save_var });
- if (requires_grad) {
+ if (compute_requires_grad({ input, grad_output, weight, save_mean, save_var })) {
grad_fn = std::make_shared<CudnnBatchNormBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ input, grad_output, weight, save_mean, save_var });
grad_fn->input_ = SavedVariable(input, false);
@@ -9101,13 +9069,14 @@
grad_fn->save_var_ = SavedVariable(save_var, false);
grad_fn->epsilon = epsilon;
}
- auto ret = as_variable(baseType->cudnn_batch_norm_backward(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = as_variable(baseType->cudnn_batch_norm_backward(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon));
+ set_history({ result0, result1, result2 }, grad_fn);
if (jit::tracer::isTracing( input, grad_output, weight, running_mean, running_var, save_mean, save_var )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm_backward", { input, grad_output, weight, running_mean, running_var, save_mean, save_var }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm_backward", { input, grad_output, weight, running_mean, running_var, save_mean, save_var }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("epsilon"), epsilon);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
Tensor VariableType::cudnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const {
profiler::RecordFunction profiler("cudnn_convolution");
@@ -9115,8 +9084,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 2);
std::shared_ptr<CudnnConvolutionBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<CudnnConvolutionBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -9128,10 +9096,10 @@
grad_fn->benchmark = benchmark;
grad_fn->deterministic = deterministic;
}
- auto ret = as_variable(baseType->cudnn_convolution(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cudnn_convolution(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution", { self, weight, bias }, { result } );
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("dilation"), dilation);
@@ -9139,13 +9107,13 @@
setattr(n, jit::stringToSymbol("benchmark"), benchmark);
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cudnn_convolution_backward_input(IntList self_size, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const {
profiler::RecordFunction profiler("cudnn_convolution_backward_input");
- auto ret = Type::cudnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
+ auto result = Type::cudnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
if (jit::tracer::isTracing( grad_output, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_input", { grad_output, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_input", { grad_output, weight }, { result } );
setattr(n, jit::stringToSymbol("self_size"), self_size);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("stride"), stride);
@@ -9154,7 +9122,7 @@
setattr(n, jit::stringToSymbol("benchmark"), benchmark);
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("cudnn_convolution_backward");
@@ -9162,8 +9130,7 @@
auto& grad_output_ = unpack(grad_output, "grad_output", 1);
auto& weight_ = unpack(weight, "weight", 2);
std::shared_ptr<CudnnConvolutionBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, grad_output, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ self, grad_output, weight })) {
grad_fn = std::make_shared<CudnnConvolutionBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ self, grad_output, weight });
grad_fn->self_ = SavedVariable(self, false);
@@ -9176,10 +9143,11 @@
grad_fn->benchmark = benchmark;
grad_fn->deterministic = deterministic;
}
- auto ret = as_variable(baseType->cudnn_convolution_backward(self_, grad_output_, weight_, padding, stride, dilation, groups, benchmark, deterministic, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = as_variable(baseType->cudnn_convolution_backward(self_, grad_output_, weight_, padding, stride, dilation, groups, benchmark, deterministic, output_mask));
+ set_history({ result0, result1, result2 }, grad_fn);
if (jit::tracer::isTracing( self, grad_output, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward", { self, grad_output, weight }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward", { self, grad_output, weight }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("dilation"), dilation);
@@ -9188,22 +9156,22 @@
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
Tensor VariableType::cudnn_convolution_backward_bias(const Tensor & grad_output) const {
profiler::RecordFunction profiler("cudnn_convolution_backward_bias");
- auto ret = Type::cudnn_convolution_backward_bias(grad_output);
+ auto result = Type::cudnn_convolution_backward_bias(grad_output);
if (jit::tracer::isTracing( grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_bias", { grad_output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_bias", { grad_output }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cudnn_convolution_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const {
profiler::RecordFunction profiler("cudnn_convolution_backward_weight");
- auto ret = Type::cudnn_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
+ auto result = Type::cudnn_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_weight", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_weight", { grad_output, self }, { result } );
setattr(n, jit::stringToSymbol("weight_size"), weight_size);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("stride"), stride);
@@ -9212,7 +9180,7 @@
setattr(n, jit::stringToSymbol("benchmark"), benchmark);
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const {
profiler::RecordFunction profiler("cudnn_convolution_transpose");
@@ -9220,8 +9188,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 2);
std::shared_ptr<CudnnConvolutionTransposeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ self, weight, bias })) {
grad_fn = std::make_shared<CudnnConvolutionTransposeBackward>();
grad_fn->next_functions = compute_next_functions({ self, weight, bias });
grad_fn->self_ = SavedVariable(self, false);
@@ -9234,10 +9201,10 @@
grad_fn->benchmark = benchmark;
grad_fn->deterministic = deterministic;
}
- auto ret = as_variable(baseType->cudnn_convolution_transpose(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->cudnn_convolution_transpose(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose", { self, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose", { self, weight, bias }, { result } );
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
setattr(n, jit::stringToSymbol("stride"), stride);
@@ -9246,7 +9213,7 @@
setattr(n, jit::stringToSymbol("benchmark"), benchmark);
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward");
@@ -9254,8 +9221,7 @@
auto& grad_output_ = unpack(grad_output, "grad_output", 1);
auto& weight_ = unpack(weight, "weight", 2);
std::shared_ptr<CudnnConvolutionTransposeBackwardBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, grad_output, weight });
- if (requires_grad) {
+ if (compute_requires_grad({ self, grad_output, weight })) {
grad_fn = std::make_shared<CudnnConvolutionTransposeBackwardBackward>();
grad_fn->next_functions = compute_next_functions({ self, grad_output, weight });
grad_fn->self_ = SavedVariable(self, false);
@@ -9269,10 +9235,11 @@
grad_fn->benchmark = benchmark;
grad_fn->deterministic = deterministic;
}
- auto ret = as_variable(baseType->cudnn_convolution_transpose_backward(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = as_variable(baseType->cudnn_convolution_transpose_backward(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask));
+ set_history({ result0, result1, result2 }, grad_fn);
if (jit::tracer::isTracing( self, grad_output, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward", { self, grad_output, weight }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward", { self, grad_output, weight }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("output_padding"), output_padding);
setattr(n, jit::stringToSymbol("stride"), stride);
@@ -9282,22 +9249,22 @@
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
Tensor VariableType::cudnn_convolution_transpose_backward_bias(const Tensor & grad_output) const {
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_bias");
- auto ret = Type::cudnn_convolution_transpose_backward_bias(grad_output);
+ auto result = Type::cudnn_convolution_transpose_backward_bias(grad_output);
if (jit::tracer::isTracing( grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_bias", { grad_output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_bias", { grad_output }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const {
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_input");
- auto ret = Type::cudnn_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
+ auto result = Type::cudnn_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic);
if (jit::tracer::isTracing( grad_output, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_input", { grad_output, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_input", { grad_output, weight }, { result } );
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("dilation"), dilation);
@@ -9305,13 +9272,13 @@
setattr(n, jit::stringToSymbol("benchmark"), benchmark);
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cudnn_convolution_transpose_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const {
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_weight");
- auto ret = Type::cudnn_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
+ auto result = Type::cudnn_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic);
if (jit::tracer::isTracing( grad_output, self )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_weight", { grad_output, self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_weight", { grad_output, self }, { result } );
setattr(n, jit::stringToSymbol("weight_size"), weight_size);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("stride"), stride);
@@ -9320,91 +9287,76 @@
setattr(n, jit::stringToSymbol("benchmark"), benchmark);
setattr(n, jit::stringToSymbol("deterministic"), deterministic);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::cudnn_grid_sampler(const Tensor & self, const Tensor & grid) const {
profiler::RecordFunction profiler("cudnn_grid_sampler");
auto& self_ = unpack(self, "self", 0);
auto& grid_ = unpack(grid, "grid", 1);
std::shared_ptr<CudnnGridSamplerBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self, grid });
- if (requires_grad) {
+ if (compute_requires_grad({ self, grid })) {
grad_fn = std::make_shared<CudnnGridSamplerBackward>();
grad_fn->next_functions = compute_next_functions({ self, grid });
grad_fn->self_ = SavedVariable(self, false);
grad_fn->grid_ = SavedVariable(grid, false);
}
- auto ret = as_variable(baseType->cudnn_grid_sampler(self_, grid_));
- set_history(ret, grad_fn);
+ auto output = as_variable(baseType->cudnn_grid_sampler(self_, grid_));
+ set_history(output, grad_fn);
if (jit::tracer::isTracing( self, grid )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler", { self, grid }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler", { self, grid }, { output } );
(void)n;
}
- return Tensor(std::move(ret));
+ return output;
}
std::tuple<Tensor,Tensor> VariableType::cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output) const {
profiler::RecordFunction profiler("cudnn_grid_sampler_backward");
- auto& self_ = unpack(self, "self", 0);
- auto& grid_ = unpack(grid, "grid", 1);
- auto& grad_output_ = unpack(grad_output, "grad_output", 2);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ self, grid, grad_output });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for cudnn_grid_sampler_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ self, grid, grad_output });
- }
- auto ret = as_variable(baseType->cudnn_grid_sampler_backward(self_, grid_, grad_output_));
- set_history({ std::get<0>(ret), std::get<1>(ret) }, grad_fn);
+ Tensor grad_self, grad_grid;
+ std::tie(grad_self, grad_grid) = Type::cudnn_grid_sampler_backward(self, grid, grad_output);
if (jit::tracer::isTracing( self, grid, grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler_backward", { self, grid, grad_output }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler_backward", { self, grid, grad_output }, { grad_self, grad_grid } );
(void)n;
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(grad_self), std::move(grad_grid));
}
Tensor VariableType::det(const Tensor & self) const {
profiler::RecordFunction profiler("det");
- auto ret = Type::det(self);
+ auto result = Type::det(self);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "det", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "det", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor,Tensor,Tensor> VariableType::_det_with_svd(const Tensor & self) const {
profiler::RecordFunction profiler("_det_with_svd");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<DetWithSvdBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<DetWithSvdBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_ = SavedVariable(self, false);
}
- auto ret = as_variable(baseType->_det_with_svd(self_));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret), std::get<3>(ret) }, grad_fn);
+ Tensor result0, result1, result2, result3;
+ std::tie(result0, result1, result2, result3) = as_variable(baseType->_det_with_svd(self_));
+ set_history({ result0, result1, result2, result3 }, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "_det_with_svd", { self }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret), std::get<3>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "_det_with_svd", { self }, { result0, result1, result2, result3 } );
(void)n;
}
if (grad_fn) {
- auto& result0 = std::get<0>(ret);
grad_fn->result0_ = SavedVariable(result0, true);
- auto& result1 = std::get<1>(ret);
grad_fn->result1_ = SavedVariable(result1, true);
- auto& result2 = std::get<2>(ret);
grad_fn->result2_ = SavedVariable(result2, true);
- auto& result3 = std::get<3>(ret);
grad_fn->result3_ = SavedVariable(result3, true);
}
- return std::tuple<Tensor,Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3));
}
Tensor VariableType::embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const {
profiler::RecordFunction profiler("embedding");
auto& weight_ = unpack(weight, "weight", 0);
auto& indices_ = unpack_long(indices, "indices", 1);
std::shared_ptr<EmbeddingBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ weight });
- if (requires_grad) {
+ if (compute_requires_grad({ weight })) {
grad_fn = std::make_shared<EmbeddingBackward>();
grad_fn->next_functions = compute_next_functions({ weight });
grad_fn->weight_argsize_0 = weight.size(0);
@@ -9413,56 +9365,47 @@
grad_fn->scale_grad_by_freq = scale_grad_by_freq;
grad_fn->sparse = sparse;
}
- auto ret = as_variable(baseType->embedding(weight_, indices_, padding_idx, scale_grad_by_freq, sparse));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->embedding(weight_, indices_, padding_idx, scale_grad_by_freq, sparse));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( weight, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "embedding", { weight, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "embedding", { weight, indices }, { result } );
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx);
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq);
setattr(n, jit::stringToSymbol("sparse"), sparse);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const {
profiler::RecordFunction profiler("embedding_backward");
- auto& grad_ = unpack(grad, "grad", 0);
- auto& indices_ = unpack_long(indices, "indices", 1);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad, indices });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for embedding_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ grad, indices });
- }
- auto ret = as_variable(baseType->embedding_backward(grad_, indices_, num_weights, padding_idx, scale_grad_by_freq, sparse));
- set_history({ ret }, grad_fn);
+ auto result = Type::embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
if (jit::tracer::isTracing( grad, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "embedding_backward", { grad, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "embedding_backward", { grad, indices }, { result } );
setattr(n, jit::stringToSymbol("num_weights"), num_weights);
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx);
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq);
setattr(n, jit::stringToSymbol("sparse"), sparse);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::embedding_dense_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const {
profiler::RecordFunction profiler("embedding_dense_backward");
auto& grad_ = unpack(grad, "grad", 0);
auto& indices_ = unpack_long(indices, "indices", 1);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad, indices });
- if (requires_grad) {
+ if (compute_requires_grad({ grad, indices })) {
grad_fn = std::make_shared<Error>("the derivative for embedding_dense_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ grad, indices });
+
}
- auto ret = as_variable(baseType->embedding_dense_backward(grad_, indices_, num_weights, padding_idx, scale_grad_by_freq));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->embedding_dense_backward(grad_, indices_, num_weights, padding_idx, scale_grad_by_freq));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( grad, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "embedding_dense_backward", { grad, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "embedding_dense_backward", { grad, indices }, { result } );
setattr(n, jit::stringToSymbol("num_weights"), num_weights);
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx);
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) const {
profiler::RecordFunction profiler("embedding_renorm_");
@@ -9470,10 +9413,10 @@
auto& indices_ = unpack_long(indices, "indices", 1);
check_inplace(self);
std::shared_ptr<EmbeddingRenormBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<EmbeddingRenormBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
baseType->embedding_renorm_(self_, indices_, max_norm, norm_type);
increment_version(self);
@@ -9477,7 +9420,7 @@
}
baseType->embedding_renorm_(self_, indices_, max_norm, norm_type);
increment_version(self);
- rebase_history(static_cast<Variable&>(self), grad_fn);
+ rebase_history(self, grad_fn);
if (jit::tracer::isTracing( self, indices )) {
jit::Node *n = jit::tracer::recordTrace( "embedding_renorm", { self, indices }, { self } );
setattr(n, jit::stringToSymbol("max_norm"), max_norm);
@@ -9487,63 +9430,53 @@
}
Tensor VariableType::embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const {
profiler::RecordFunction profiler("embedding_sparse_backward");
- auto& grad_ = unpack(grad, "grad", 0);
- auto& indices_ = unpack_long(indices, "indices", 1);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ grad, indices });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for embedding_sparse_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ grad, indices });
- }
- auto ret = as_variable(baseType->embedding_sparse_backward(grad_, indices_, num_weights, padding_idx, scale_grad_by_freq));
- set_history({ ret }, grad_fn);
+ auto result = Type::embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
if (jit::tracer::isTracing( grad, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "embedding_sparse_backward", { grad, indices }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "embedding_sparse_backward", { grad, indices }, { result } );
setattr(n, jit::stringToSymbol("num_weights"), num_weights);
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx);
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::expand(const Tensor & self, IntList size) const {
profiler::RecordFunction profiler("expand");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ExpandBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<ExpandBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->expand(self_, size));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->expand(self_, size));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "expand", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "expand", { self }, { result } );
setattr(n, jit::stringToSymbol("size"), size);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::expand_as(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("expand_as");
- auto ret = Type::expand_as(self, other);
+ auto result = Type::expand_as(self, other);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "expand_as", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "expand_as", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::index(const Tensor & self, TensorList indices) const {
profiler::RecordFunction profiler("index");
- auto ret = Type::index(self, indices);
+ auto result = Type::index(self, indices);
if (jit::tracer::isTracing( self, indices )) {
- jit::Node *n = jit::tracer::recordTrace( "index", flatten( self, indices ), { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "index", flatten( self, indices ), { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::index_put_(Tensor & self, TensorList indices, const Tensor & values) const {
profiler::RecordFunction profiler("index_put_");
- auto ret = Type::index_put_(self, indices, values);
+ Type::index_put_(self, indices, values);
if (jit::tracer::isTracing( self, indices, values )) {
jit::Node *n = jit::tracer::recordTrace( "index_put", flatten( self, indices, values ), { self } );
(void)n;
@@ -9551,62 +9484,63 @@
return self;
}
bool VariableType::is_cuda(const Tensor & self) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->is_cuda(self_);
+ auto result = Type::is_cuda(self);
+ return result;
}
bool VariableType::is_distributed(const Tensor & self) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->is_distributed(self_);
+ auto result = Type::is_distributed(self);
+ return result;
}
bool VariableType::is_nonzero(const Tensor & self) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->is_nonzero(self_);
+ profiler::RecordFunction profiler("is_nonzero");
+ auto result = Type::is_nonzero(self);
+ return result;
}
bool VariableType::is_same_size(const Tensor & self, const Tensor & other) const {
- auto& self_ = unpack(self, "self", 0);
- auto& other_ = unpack(other, "other", 1);
- return baseType->is_same_size(self_, other_);
+ auto result = Type::is_same_size(self, other);
+ return result;
}
bool VariableType::is_signed(const Tensor & self) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->is_signed(self_);
+ auto result = Type::is_signed(self);
+ return result;
}
bool VariableType::is_sparse(const Tensor & self) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->is_sparse(self_);
+ auto result = Type::is_sparse(self);
+ return result;
}
Tensor VariableType::matmul(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("matmul");
- auto ret = Type::matmul(self, other);
+ auto result = Type::matmul(self, other);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "matmul", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "matmul", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::max_pool1d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const {
profiler::RecordFunction profiler("max_pool1d");
- auto ret = Type::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
+ Tensor result0, result1;
+ std::tie(result0, result1) = Type::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "max_pool1d", { self }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "max_pool1d", { self }, { result0, result1 } );
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size);
setattr(n, jit::stringToSymbol("stride"), stride);
setattr(n, jit::stringToSymbol("padding"), padding);
setattr(n, jit::stringToSymbol("dilation"), dilation);
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1));
}
Tensor VariableType::narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) const {
profiler::RecordFunction profiler("narrow");
- auto ret = Type::narrow(self, dim, start, length);
+ auto result = Type::narrow(self, dim, start, length);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "narrow", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "narrow", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("start"), start);
setattr(n, jit::stringToSymbol("length"), length);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const {
profiler::RecordFunction profiler("nnpack_spatial_convolution");
@@ -9614,8 +9548,7 @@
auto& weight_ = unpack(weight, "weight", 1);
auto bias_ = unpack_opt(bias, "bias", 2);
std::shared_ptr<NnpackSpatialConvolutionBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ input, weight, bias });
- if (requires_grad) {
+ if (compute_requires_grad({ input, weight, bias })) {
grad_fn = std::make_shared<NnpackSpatialConvolutionBackward>();
grad_fn->next_functions = compute_next_functions({ input, weight, bias });
grad_fn->input_ = SavedVariable(input, false);
@@ -9626,91 +9559,81 @@
grad_fn->padH = padH;
grad_fn->weight_sizes = weight.sizes();
}
- auto ret = as_variable(baseType->nnpack_spatial_convolution(input_, weight_, bias_, kW, kH, padW, padH));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->nnpack_spatial_convolution(input_, weight_, bias_, kW, kH, padW, padH));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( input, weight, bias )) {
- jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution", { input, weight, bias }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution", { input, weight, bias }, { result } );
setattr(n, jit::stringToSymbol("kW"), kW);
setattr(n, jit::stringToSymbol("kH"), kH);
setattr(n, jit::stringToSymbol("padW"), padW);
setattr(n, jit::stringToSymbol("padH"), padH);
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor,Tensor> VariableType::nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, int64_t kW, int64_t kH, int64_t padW, int64_t padH, std::array<bool,3> output_mask) const {
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward");
- auto& input_ = unpack(input, "input", 0);
- auto& grad_output_ = unpack(grad_output, "grad_output", 1);
- auto& weight_ = unpack(weight, "weight", 2);
- std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ input, grad_output, weight });
- if (requires_grad) {
- grad_fn = std::make_shared<Error>("the derivative for nnpack_spatial_convolution_backward is not implemented");
- grad_fn->next_functions = compute_next_functions({ input, grad_output, weight });
- }
- auto ret = as_variable(baseType->nnpack_spatial_convolution_backward(input_, grad_output_, weight_, kW, kH, padW, padH, output_mask));
- set_history({ std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) }, grad_fn);
+ Tensor result0, result1, result2;
+ std::tie(result0, result1, result2) = Type::nnpack_spatial_convolution_backward(input, grad_output, weight, kW, kH, padW, padH, output_mask);
if (jit::tracer::isTracing( input, grad_output, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward", { input, grad_output, weight }, { std::get<0>(ret), std::get<1>(ret), std::get<2>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward", { input, grad_output, weight }, { result0, result1, result2 } );
setattr(n, jit::stringToSymbol("kW"), kW);
setattr(n, jit::stringToSymbol("kH"), kH);
setattr(n, jit::stringToSymbol("padW"), padW);
setattr(n, jit::stringToSymbol("padH"), padH);
setattr(n, jit::stringToSymbol("output_mask"), output_mask);
}
- return std::tuple<Tensor,Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
Tensor VariableType::nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const {
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward_input");
- auto ret = Type::nnpack_spatial_convolution_backward_input(input, grad_output, weight, kW, kH, padW, padH);
+ auto result = Type::nnpack_spatial_convolution_backward_input(input, grad_output, weight, kW, kH, padW, padH);
if (jit::tracer::isTracing( input, grad_output, weight )) {
- jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_input", { input, grad_output, weight }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_input", { input, grad_output, weight }, { result } );
setattr(n, jit::stringToSymbol("kW"), kW);
setattr(n, jit::stringToSymbol("kH"), kH);
setattr(n, jit::stringToSymbol("padW"), padW);
setattr(n, jit::stringToSymbol("padH"), padH);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::nnpack_spatial_convolution_backward_weight(const Tensor & input, IntList weight_size, const Tensor & grad_output, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const {
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward_weight");
- auto ret = Type::nnpack_spatial_convolution_backward_weight(input, weight_size, grad_output, kW, kH, padW, padH);
+ auto result = Type::nnpack_spatial_convolution_backward_weight(input, weight_size, grad_output, kW, kH, padW, padH);
if (jit::tracer::isTracing( input, grad_output )) {
- jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_weight", { input, grad_output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_weight", { input, grad_output }, { result } );
setattr(n, jit::stringToSymbol("weight_size"), weight_size);
setattr(n, jit::stringToSymbol("kW"), kW);
setattr(n, jit::stringToSymbol("kH"), kH);
setattr(n, jit::stringToSymbol("padW"), padW);
setattr(n, jit::stringToSymbol("padH"), padH);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::permute(const Tensor & self, IntList dims) const {
profiler::RecordFunction profiler("permute");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PermuteBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<PermuteBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dims = dims;
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->permute(self_, dims));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->permute(self_, dims));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "permute", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "permute", { self }, { result } );
setattr(n, jit::stringToSymbol("dims"), dims);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::pin_memory(const Tensor & self) const {
profiler::RecordFunction profiler("pin_memory");
- auto ret = Type::pin_memory(self);
+ auto result = Type::pin_memory(self);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "pin_memory", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "pin_memory", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
std::tuple<Tensor,Tensor> VariableType::RoiPooling2d_forward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale) const {
profiler::RecordFunction profiler("RoiPooling2d_forward");
@@ -9718,8 +9641,7 @@
auto& rois_ = unpack(rois, "rois", 1);
check_no_requires_grad(rois, "rois");
std::shared_ptr<Roipooling2DBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ input });
- if (requires_grad) {
+ if (compute_requires_grad({ input })) {
grad_fn = std::make_shared<Roipooling2DBackward>();
grad_fn->next_functions = compute_next_functions({ input });
grad_fn->input_ = SavedVariable(input, false);
@@ -9728,19 +9650,19 @@
grad_fn->pooledWidth = pooledWidth;
grad_fn->spatialScale = spatialScale;
}
- auto ret = as_variable(baseType->RoiPooling2d_forward(input_, rois_, pooledHeight, pooledWidth, spatialScale));
- set_history(std::get<0>(ret), grad_fn);
+ Tensor result0, result1;
+ std::tie(result0, result1) = as_variable(baseType->RoiPooling2d_forward(input_, rois_, pooledHeight, pooledWidth, spatialScale));
+ set_history(result0, grad_fn);
if (jit::tracer::isTracing( input, rois )) {
- jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_forward", { input, rois }, { std::get<0>(ret), std::get<1>(ret) } );
+ jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_forward", { input, rois }, { result0, result1 } );
setattr(n, jit::stringToSymbol("pooledHeight"), pooledHeight);
setattr(n, jit::stringToSymbol("pooledWidth"), pooledWidth);
setattr(n, jit::stringToSymbol("spatialScale"), spatialScale);
}
if (grad_fn) {
- auto& result1 = std::get<1>(ret);
grad_fn->result1_ = SavedVariable(result1, true);
}
- return std::tuple<Tensor,Tensor>(std::move(ret));
+ return std::make_tuple(std::move(result0), std::move(result1));
}
Tensor VariableType::RoiPooling2d_backward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale, const Tensor & gradOutput, const Tensor & argmaxes) const {
profiler::RecordFunction profiler("RoiPooling2d_backward");
@@ -9749,53 +9671,53 @@
auto& gradOutput_ = unpack(gradOutput, "gradOutput", 5);
auto& argmaxes_ = unpack(argmaxes, "argmaxes", 6);
std::shared_ptr<Error> grad_fn;
- auto requires_grad = compute_requires_grad({ input, rois, gradOutput, argmaxes });
- if (requires_grad) {
+ if (compute_requires_grad({ input, rois, gradOutput, argmaxes })) {
grad_fn = std::make_shared<Error>("the derivative for RoiPooling2d_backward is not implemented");
grad_fn->next_functions = compute_next_functions({ input, rois, gradOutput, argmaxes });
+
}
- auto ret = as_variable(baseType->RoiPooling2d_backward(input_, rois_, pooledHeight, pooledWidth, spatialScale, gradOutput_, argmaxes_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->RoiPooling2d_backward(input_, rois_, pooledHeight, pooledWidth, spatialScale, gradOutput_, argmaxes_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( input, rois, gradOutput, argmaxes )) {
- jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_backward", { input, rois, gradOutput, argmaxes }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_backward", { input, rois, gradOutput, argmaxes }, { result } );
setattr(n, jit::stringToSymbol("pooledHeight"), pooledHeight);
setattr(n, jit::stringToSymbol("pooledWidth"), pooledWidth);
setattr(n, jit::stringToSymbol("spatialScale"), spatialScale);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const {
profiler::RecordFunction profiler("rrelu");
- auto ret = Type::rrelu(self, lower, upper, training, generator);
- return Tensor(std::move(ret));
+ auto result = Type::rrelu(self, lower, upper, training, generator);
+ return result;
}
Tensor & VariableType::rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const {
profiler::RecordFunction profiler("rrelu_");
- auto ret = Type::rrelu_(self, lower, upper, training, generator);
+ Type::rrelu_(self, lower, upper, training, generator);
return self;
}
Tensor VariableType::select(const Tensor & self, int64_t dim, int64_t index) const {
profiler::RecordFunction profiler("select");
- auto ret = Type::select(self, dim, index);
+ auto result = Type::select(self, dim, index);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "select", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "select", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("index"), index);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::selu(const Tensor & self) const {
profiler::RecordFunction profiler("selu");
- auto ret = Type::selu(self);
+ auto result = Type::selu(self);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::selu_(Tensor & self) const {
profiler::RecordFunction profiler("selu_");
- auto ret = Type::selu_(self);
+ Type::selu_(self);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { self } );
(void)n;
@@ -9803,27 +9725,26 @@
return self;
}
int64_t VariableType::size(const Tensor & self, int64_t dim) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->size(self_, dim);
+ auto result = Type::size(self, dim);
+ return result;
}
Tensor VariableType::slice(const Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) const {
profiler::RecordFunction profiler("slice");
- auto ret = Type::slice(self, dim, start, end, step);
+ auto result = Type::slice(self, dim, start, end, step);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "slice", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "slice", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
setattr(n, jit::stringToSymbol("start"), start);
setattr(n, jit::stringToSymbol("end"), end);
setattr(n, jit::stringToSymbol("step"), step);
}
- return Tensor(std::move(ret));
+ return result;
}
std::vector<Tensor> VariableType::split(const Tensor & self, int64_t split_size, int64_t dim) const {
profiler::RecordFunction profiler("split");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SplitBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SplitBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
@@ -9831,60 +9752,57 @@
grad_fn->split_size = split_size;
grad_fn->dim = dim;
}
- auto ret = as_variable(baseType->split(self_, split_size, dim));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->split(self_, split_size, dim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "split", { self }, ret );
+ jit::Node *n = jit::tracer::recordTrace( "split", { self }, flatten(result) );
setattr(n, jit::stringToSymbol("split_size"), split_size);
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return as_tensor_list(ret);
+ return result;
}
Tensor VariableType::squeeze(const Tensor & self) const {
profiler::RecordFunction profiler("squeeze");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SqueezeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SqueezeBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->squeeze(self_));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->squeeze(self_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::squeeze(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("squeeze");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SqueezeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SqueezeBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
grad_fn->self_argsize_dim = self.size(dim);
grad_fn->dim = dim;
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->squeeze(self_, dim));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->squeeze(self_, dim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::squeeze_(Tensor & self) const {
profiler::RecordFunction profiler("squeeze_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SqueezeBackward0> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SqueezeBackward0>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
@@ -9892,7 +9810,7 @@
baseType->squeeze_(self_);
ensure_no_aten_scalars(self);
increment_version(self);
- set_history(static_cast<Variable&>(self), grad_fn);
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { self } );
(void)n;
@@ -9904,8 +9822,7 @@
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<SqueezeBackward1> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<SqueezeBackward1>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->self_sizes = self.sizes();
@@ -9915,7 +9832,7 @@
baseType->squeeze_(self_, dim);
ensure_no_aten_scalars(self);
increment_version(self);
- set_history(static_cast<Variable&>(self), grad_fn);
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -9924,64 +9841,62 @@
}
Tensor VariableType::stack(TensorList tensors, int64_t dim) const {
profiler::RecordFunction profiler("stack");
- auto ret = Type::stack(tensors, dim);
+ auto result = Type::stack(tensors, dim);
if (jit::tracer::isTracing( tensors )) {
- jit::Node *n = jit::tracer::recordTrace( "stack", flatten( tensors ), { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "stack", flatten( tensors ), { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::stft(const Tensor & self, int64_t frame_length, int64_t hop, int64_t fft_size, bool return_onesided, const Tensor & window, int64_t pad_end) const {
profiler::RecordFunction profiler("stft");
- auto ret = Type::stft(self, frame_length, hop, fft_size, return_onesided, window, pad_end);
+ auto result = Type::stft(self, frame_length, hop, fft_size, return_onesided, window, pad_end);
if (jit::tracer::isTracing( self, window )) {
- jit::Node *n = jit::tracer::recordTrace( "stft", { self, window }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "stft", { self, window }, { result } );
setattr(n, jit::stringToSymbol("frame_length"), frame_length);
setattr(n, jit::stringToSymbol("hop"), hop);
setattr(n, jit::stringToSymbol("fft_size"), fft_size);
setattr(n, jit::stringToSymbol("return_onesided"), return_onesided);
setattr(n, jit::stringToSymbol("pad_end"), pad_end);
}
- return Tensor(std::move(ret));
+ return result;
}
int64_t VariableType::stride(const Tensor & self, int64_t dim) const {
- auto& self_ = unpack(self, "self", 0);
- return baseType->stride(self_, dim);
+ auto result = Type::stride(self, dim);
+ return result;
}
Tensor VariableType::type_as(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("type_as");
- auto ret = Type::type_as(self, other);
+ auto result = Type::type_as(self, other);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "type_as", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "type_as", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::unsqueeze(const Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("unsqueeze");
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UnsqueezeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UnsqueezeBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim = dim;
}
- auto ret = as_view(static_cast<const Variable&>(self), baseType->unsqueeze(self_, dim));
- set_history(ret, grad_fn);
+ auto result = as_view(self, baseType->unsqueeze(self_, dim));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self )) {
- jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { result } );
setattr(n, jit::stringToSymbol("dim"), dim);
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor & VariableType::unsqueeze_(Tensor & self, int64_t dim) const {
profiler::RecordFunction profiler("unsqueeze_");
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
std::shared_ptr<UnsqueezeBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<UnsqueezeBackward>();
grad_fn->next_functions = compute_next_functions({ self });
grad_fn->dim = dim;
@@ -9989,7 +9904,7 @@
baseType->unsqueeze_(self_, dim);
ensure_no_aten_scalars(self);
increment_version(self);
- set_history(static_cast<Variable&>(self), grad_fn);
+ set_history(self, grad_fn);
if (jit::tracer::isTracing( self )) {
jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { self } );
setattr(n, jit::stringToSymbol("dim"), dim);
@@ -9998,21 +9913,21 @@
}
Tensor VariableType::view_as(const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("view_as");
- auto ret = Type::view_as(self, other);
+ auto result = Type::view_as(self, other);
if (jit::tracer::isTracing( self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "view_as", { self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "view_as", { self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::where(const Tensor & condition, const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("where");
- auto ret = Type::where(condition, self, other);
+ auto result = Type::where(condition, self, other);
if (jit::tracer::isTracing( condition, self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "where", { condition, self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "where", { condition, self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::_s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const {
profiler::RecordFunction profiler("_s_where");
@@ -10020,38 +9935,37 @@
auto& self_ = unpack(self, "self", 1);
auto& other_ = unpack(other, "other", 2);
std::shared_ptr<SWhereBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ condition, self, other });
- if (requires_grad) {
+ if (compute_requires_grad({ condition, self, other })) {
grad_fn = std::make_shared<SWhereBackward>();
grad_fn->next_functions = compute_next_functions({ condition, self, other });
grad_fn->condition_info = condition;
grad_fn->condition_ = SavedVariable(condition, false);
}
- auto ret = as_variable(baseType->_s_where(condition_, self_, other_));
- set_history(ret, grad_fn);
+ auto result = as_variable(baseType->_s_where(condition_, self_, other_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( condition, self, other )) {
- jit::Node *n = jit::tracer::recordTrace( "_s_where", { condition, self, other }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_s_where", { condition, self, other }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const {
profiler::RecordFunction profiler("_standard_gamma_grad");
auto& self_ = unpack(self, "self", 0);
auto& output_ = unpack(output, "output", 1);
std::shared_ptr<StandardGammaGradBackward> grad_fn;
- auto requires_grad = compute_requires_grad({ self });
- if (requires_grad) {
+ if (compute_requires_grad({ self })) {
grad_fn = std::make_shared<StandardGammaGradBackward>();
grad_fn->next_functions = compute_next_functions({ self });
+
}
- auto ret = as_variable(baseType->_standard_gamma_grad(self_, output_));
- set_history({ ret }, grad_fn);
+ auto result = as_variable(baseType->_standard_gamma_grad(self_, output_));
+ set_history(result, grad_fn);
if (jit::tracer::isTracing( self, output )) {
- jit::Node *n = jit::tracer::recordTrace( "_standard_gamma_grad", { self, output }, { ret } );
+ jit::Node *n = jit::tracer::recordTrace( "_standard_gamma_grad", { self, output }, { result } );
(void)n;
}
- return Tensor(std::move(ret));
+ return result;
}
}} // namespace torch::autograd
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment