Created
January 9, 2018 20:46
-
-
Save colesbury/435bc8efb7841012478d508bc9a50566 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- VariableType.cpp 2018-01-09 12:45:35.044936501 -0800 | |
+++ ./torch/csrc/autograd/generated/VariableType.cpp 2018-01-09 12:45:57.906005558 -0800 | |
@@ -289,41 +289,67 @@ | |
} | |
} | |
-static void rebase_history(Tensor& tensor, std::shared_ptr<Function> grad_fn, int output_nr=0) { | |
- if (!tensor.defined()) { | |
- return; | |
+static void check_output_args(const char* name, TensorList tensors) { | |
+ // Our logic for modifications to views only works for in-place functions, | |
+ // not out=... functions. Checks that no output arguments are views. | |
+ for (auto& tensor : tensors) { | |
+ if (tensor.defined()) { | |
+ auto& var = static_cast<const Variable&>(tensor); | |
+ if (var.is_view()) { | |
+ at::runtime_error( | |
+ "%s(): output arguments (out=...) must not be views in " | |
+ "differentiable functions", name); | |
+ } | |
} | |
+ } | |
+} | |
+ | |
+static void rebase_history(Tensor& tensor, std::shared_ptr<Function> grad_fn) { | |
+ if (grad_fn && tensor.defined()) { | |
auto& var = static_cast<Variable&>(tensor); | |
- if (grad_fn) { | |
grad_fn->num_inputs = 1; | |
- var.rebase_history(output_nr, std::move(grad_fn)); | |
+ var.rebase_history(0, std::move(grad_fn)); | |
+ } | |
+} | |
+ | |
+static void rebase_history(TensorList tensors, std::shared_ptr<Function> grad_fn) { | |
+ if (grad_fn) { | |
+ int output_nr = 0; | |
+ for (auto& tensor : tensors) { | |
+ if (tensor.defined()) { | |
+ auto& var = static_cast<Variable&>(const_cast<Tensor&>(tensor)); | |
+ var.rebase_history(output_nr, grad_fn); | |
+ output_nr++; | |
+ } | |
+ } | |
+ grad_fn->num_inputs = output_nr; | |
} | |
} | |
// var must be the only differentiable output of the function. Use the ArrayRef | |
// overload for functions with multiple differentiable outputs. | |
-static void set_history(Tensor& t, std::shared_ptr<Function> grad_fn, int output_nr=0) { | |
- auto& var = static_cast<Variable&>(t); | |
- if (grad_fn) { | |
+static void set_history(Tensor& tensor, std::shared_ptr<Function> grad_fn) { | |
+ if (grad_fn && tensor.defined()) { | |
+ auto& var = static_cast<Variable&>(tensor); | |
grad_fn->num_inputs = 1; | |
- var.get()->output_nr = output_nr; | |
+ var.get()->output_nr = 0; | |
var.get()->_grad_fn = std::move(grad_fn); | |
} | |
} | |
-static void set_history(at::ArrayRef<Tensor> tl, std::shared_ptr<Function> grad_fn) { | |
+static void set_history(TensorList tensors, std::shared_ptr<Function> grad_fn) { | |
if (grad_fn) { | |
- grad_fn->num_inputs = tl.size(); | |
int64_t output_nr = 0; | |
- for (auto& t : tl) { | |
- if (!t.defined()) continue; | |
- // TODO: combine this with the Variable construction | |
- auto& var = static_cast<const Variable&>(t); | |
+ for (auto& tensor : tensors) { | |
+ if (tensor.defined()) { | |
+ auto& var = static_cast<Variable&>(const_cast<Tensor&>(tensor)); | |
var.get()->output_nr = output_nr; | |
var.get()->_grad_fn = grad_fn; | |
output_nr++; | |
} | |
} | |
+ grad_fn->num_inputs = output_nr; | |
+ } | |
} | |
static at::ArrayRef<Variable> flatten(TensorList tensors) { | |
@@ -375,7 +401,7 @@ | |
} | |
baseType->s_copy_(self_, src_, async); | |
increment_version(self); | |
- rebase_history(static_cast<Variable&>(self), std::move(grad_fn)); | |
+ rebase_history(self, std::move(grad_fn)); | |
return self; | |
} | |
@@ -413,11 +439,32 @@ | |
auto result = baseType->storage_offset(self_); | |
return result; | |
} | |
+Tensor & VariableType::zeros_out(Tensor & result, IntList size) const { | |
+ profiler::RecordFunction profiler("zeros_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->zeros_out(result_, size); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "zeros_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("size"), size); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::zeros(IntList size) const { | |
profiler::RecordFunction profiler("zeros"); | |
auto result = as_variable(baseType->zeros(size)); | |
return result; | |
} | |
+Tensor & VariableType::zeros_like_out(Tensor & result, const Tensor & input) const { | |
+ profiler::RecordFunction profiler("zeros_like_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& input_ = unpack(input, "input", 1); | |
+ baseType->zeros_like_out(result_, input_); | |
+ if (jit::tracer::isTracing( result, input )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "zeros_like_out", { result, input }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::zeros_like(const Tensor & input) const { | |
profiler::RecordFunction profiler("zeros_like"); | |
auto& input_ = unpack(input, "input", 0); | |
@@ -428,11 +475,32 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::ones_out(Tensor & result, IntList size) const { | |
+ profiler::RecordFunction profiler("ones_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->ones_out(result_, size); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ones_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("size"), size); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ones(IntList size) const { | |
profiler::RecordFunction profiler("ones"); | |
auto result = as_variable(baseType->ones(size)); | |
return result; | |
} | |
+Tensor & VariableType::ones_like_out(Tensor & result, const Tensor & input) const { | |
+ profiler::RecordFunction profiler("ones_like_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& input_ = unpack(input, "input", 1); | |
+ baseType->ones_like_out(result_, input_); | |
+ if (jit::tracer::isTracing( result, input )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ones_like_out", { result, input }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ones_like(const Tensor & input) const { | |
profiler::RecordFunction profiler("ones_like"); | |
auto& input_ = unpack(input, "input", 0); | |
@@ -594,10 +656,9 @@ | |
auto& value_ = unpack(value, "value", 2); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ self, mask, value })) { | |
+ if (compute_requires_grad({ self, value })) { | |
grad_fn = std::make_shared<Error>("the derivative for masked_fill_ is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ self, mask, value }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, value }); | |
} | |
baseType->s_masked_fill_(self_, mask_, value_); | |
increment_version(self); | |
@@ -630,6 +691,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_masked_select_out(Tensor & result, const Tensor & self, const Tensor & mask) const { | |
+ profiler::RecordFunction profiler("masked_select_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& mask_ = unpack_byte(mask, "mask", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<MaskedSelectBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("masked_select_out", { result }); | |
+ grad_fn = std::make_shared<MaskedSelectBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_info = self; | |
+ grad_fn->mask_ = SavedVariable(mask, false); | |
+ } | |
+ baseType->s_masked_select_out(result_, self_, mask_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, mask )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "masked_select_out", { result, self, mask }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_masked_select(const Tensor & self, const Tensor & mask) const { | |
profiler::RecordFunction profiler("masked_select"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -727,6 +809,17 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::nonzero_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("nonzero_out"); | |
+ auto& result_ = unpack_long(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->nonzero_out(result_, self_); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nonzero_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::nonzero(const Tensor & self) const { | |
profiler::RecordFunction profiler("nonzero"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -771,6 +863,30 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::index_select_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) const { | |
+ profiler::RecordFunction profiler("index_select_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& index_ = unpack_long(index, "index", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<IndexSelectBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("index_select_out", { result }); | |
+ grad_fn = std::make_shared<IndexSelectBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->index_ = SavedVariable(index, false); | |
+ } | |
+ baseType->index_select_out(result_, self_, dim, index_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, index )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "index_select_out", { result, self, index }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::index_select(const Tensor & self, int64_t dim, const Tensor & index) const { | |
profiler::RecordFunction profiler("index_select"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -813,6 +929,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::take_out(Tensor & result, const Tensor & self, const Tensor & index) const { | |
+ profiler::RecordFunction profiler("take_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& index_ = unpack_long(index, "index", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<TakeBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("take_out", { result }); | |
+ grad_fn = std::make_shared<TakeBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_info = self; | |
+ grad_fn->index_ = SavedVariable(index, false); | |
+ } | |
+ baseType->take_out(result_, self_, index_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, index )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "take_out", { result, self, index }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::take(const Tensor & self, const Tensor & index) const { | |
profiler::RecordFunction profiler("take"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -906,10 +1045,9 @@ | |
auto& value_ = unpack(value, "value", 3); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ self, index, value })) { | |
+ if (compute_requires_grad({ self, value })) { | |
grad_fn = std::make_shared<Error>("the derivative for index_fill_ is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ self, index, value }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, value }); | |
} | |
baseType->index_fill_(self_, dim, index_, value_); | |
increment_version(self); | |
@@ -942,16 +1080,50 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::range_out(Tensor & result, Scalar start, Scalar end, Scalar step) const { | |
+ profiler::RecordFunction profiler("range_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->range_out(result_, start, end, step); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "range_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("start"), start); | |
+ setattr(n, jit::stringToSymbol("end"), end); | |
+ setattr(n, jit::stringToSymbol("step"), step); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::range(Scalar start, Scalar end, Scalar step) const { | |
profiler::RecordFunction profiler("range"); | |
auto result = as_variable(baseType->range(start, end, step)); | |
return result; | |
} | |
+Tensor & VariableType::arange_out(Tensor & result, Scalar start, Scalar end, Scalar step) const { | |
+ profiler::RecordFunction profiler("arange_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->arange_out(result_, start, end, step); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "arange_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("start"), start); | |
+ setattr(n, jit::stringToSymbol("end"), end); | |
+ setattr(n, jit::stringToSymbol("step"), step); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::arange(Scalar start, Scalar end, Scalar step) const { | |
profiler::RecordFunction profiler("arange"); | |
auto result = as_variable(baseType->arange(start, end, step)); | |
return result; | |
} | |
+Tensor & VariableType::arange_out(Tensor & result, Scalar end) const { | |
+ profiler::RecordFunction profiler("arange_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->arange_out(result_, end); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "arange_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("end"), end); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::arange(Scalar end) const { | |
profiler::RecordFunction profiler("arange"); | |
auto result = as_variable(baseType->arange(end)); | |
@@ -1023,6 +1195,30 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::gather_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) const { | |
+ profiler::RecordFunction profiler("gather_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& index_ = unpack_long(index, "index", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<GatherBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("gather_out", { result }); | |
+ grad_fn = std::make_shared<GatherBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->index_ = SavedVariable(index, false); | |
+ } | |
+ baseType->gather_out(result_, self_, dim, index_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, index )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "gather_out", { result, self, index }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::gather(const Tensor & self, int64_t dim, const Tensor & index) const { | |
profiler::RecordFunction profiler("gather"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1055,6 +1251,17 @@ | |
auto result = baseType->equal(self_, other_); | |
return result; | |
} | |
+Tensor & VariableType::__and___out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("__and___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->__and___out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__and___out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::__and__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__and__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1065,6 +1272,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s___and___out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("__and___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s___and___out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__and___out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s___and__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__and__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1079,7 +1298,15 @@ | |
Tensor & VariableType::__iand__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__iand__"); | |
auto& self_ = unpack(self, "self", 0); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __iand__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
baseType->__iand__(self_, other); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__iand_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
@@ -1090,13 +1317,32 @@ | |
profiler::RecordFunction profiler("__iand__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __iand__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ } | |
baseType->s___iand__(self_, other_); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__iand_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
+Tensor & VariableType::__or___out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("__or___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->__or___out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__or___out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::__or__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__or__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1107,6 +1353,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s___or___out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("__or___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s___or___out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__or___out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s___or__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__or__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1121,7 +1379,15 @@ | |
Tensor & VariableType::__ior__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__ior__"); | |
auto& self_ = unpack(self, "self", 0); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __ior__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
baseType->__ior__(self_, other); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ior_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
@@ -1132,13 +1398,32 @@ | |
profiler::RecordFunction profiler("__ior__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __ior__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ } | |
baseType->s___ior__(self_, other_); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ior_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
+Tensor & VariableType::__xor___out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("__xor___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->__xor___out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__xor___out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::__xor__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__xor__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1149,6 +1434,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s___xor___out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("__xor___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s___xor___out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__xor___out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s___xor__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__xor__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1163,7 +1460,15 @@ | |
Tensor & VariableType::__ixor__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__ixor__"); | |
auto& self_ = unpack(self, "self", 0); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __ixor__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
baseType->__ixor__(self_, other); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ixor_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
@@ -1174,13 +1479,32 @@ | |
profiler::RecordFunction profiler("__ixor__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __ixor__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ } | |
baseType->s___ixor__(self_, other_); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ixor_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
+Tensor & VariableType::__lshift___out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("__lshift___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->__lshift___out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__lshift___out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::__lshift__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__lshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1191,6 +1515,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s___lshift___out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("__lshift___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s___lshift___out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__lshift___out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s___lshift__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__lshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1205,7 +1541,15 @@ | |
Tensor & VariableType::__ilshift__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__ilshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __ilshift__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
baseType->__ilshift__(self_, other); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ilshift_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
@@ -1216,13 +1560,32 @@ | |
profiler::RecordFunction profiler("__ilshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __ilshift__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ } | |
baseType->s___ilshift__(self_, other_); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ilshift_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
+Tensor & VariableType::__rshift___out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("__rshift___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->__rshift___out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__rshift___out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::__rshift__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__rshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1233,6 +1596,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s___rshift___out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("__rshift___out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s___rshift___out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "__rshift___out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s___rshift__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__rshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1247,7 +1622,15 @@ | |
Tensor & VariableType::__irshift__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__irshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __irshift__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
baseType->__irshift__(self_, other); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__irshift_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
@@ -1258,13 +1641,32 @@ | |
profiler::RecordFunction profiler("__irshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
+ check_inplace(self); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ grad_fn = std::make_shared<Error>("the derivative for __irshift__ is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ } | |
baseType->s___irshift__(self_, other_); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__irshift_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
+Tensor & VariableType::lt_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("lt_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->lt_out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "lt_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::lt(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("lt"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1275,6 +1677,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_lt_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("lt_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s_lt_out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "lt_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_lt(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("lt"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1326,6 +1740,17 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::gt_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("gt_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->gt_out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "gt_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::gt(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("gt"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1336,6 +1761,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_gt_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("gt_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s_gt_out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "gt_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_gt(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("gt"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1387,6 +1824,17 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::le_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("le_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->le_out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "le_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::le(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("le"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1397,6 +1845,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_le_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("le_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s_le_out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "le_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_le(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("le"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1448,6 +1908,17 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::ge_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("ge_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->ge_out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ge_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ge(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ge"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1458,6 +1929,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_ge_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("ge_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s_ge_out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ge_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_ge(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ge"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1509,6 +1992,17 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::eq_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("eq_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->eq_out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "eq_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::eq(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("eq"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1519,6 +2013,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_eq_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("eq_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s_eq_out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "eq_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_eq(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("eq"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1570,6 +2076,17 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::ne_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("ne_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->ne_out(result_, self_, other); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ne_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ne(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ne"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1580,6 +2097,18 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_ne_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("ne_out"); | |
+ auto& result_ = unpack_byte(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ baseType->s_ne_out(result_, self_, other_); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ne_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_ne(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ne"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1631,6 +2159,34 @@ | |
} | |
return self; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("min_out"); | |
+ auto& min_ = unpack(min, "min", 0); | |
+ auto& min_indices_ = unpack_long(min_indices, "min_indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(min); | |
+ std::shared_ptr<MinBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("min_out", { min }); | |
+ grad_fn = std::make_shared<MinBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->min_out(min_, min_indices_, self_, dim, keepdim); | |
+ increment_version(min); | |
+ rebase_history(min, grad_fn); | |
+ if (jit::tracer::isTracing( min, min_indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "min_out", { min, min_indices, self }, {min, min_indices} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->min_indices_ = SavedVariable(min_indices, true); | |
+ } | |
+ return std::forward_as_tuple(min, min_indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::min(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1655,6 +2211,29 @@ | |
} | |
return std::make_tuple(std::move(min), std::move(min_indices)); | |
} | |
+Tensor & VariableType::s_min_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("min_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<MinBackward2> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("min_out", { result }); | |
+ grad_fn = std::make_shared<MinBackward2>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->s_min_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "min_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_min(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1694,6 +2273,34 @@ | |
} | |
return result; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::max_out(Tensor & max, Tensor & max_indices, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("max_out"); | |
+ auto& max_ = unpack(max, "max", 0); | |
+ auto& max_indices_ = unpack_long(max_indices, "max_indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(max); | |
+ std::shared_ptr<MaxBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("max_out", { max }); | |
+ grad_fn = std::make_shared<MaxBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->max_out(max_, max_indices_, self_, dim, keepdim); | |
+ increment_version(max); | |
+ rebase_history(max, grad_fn); | |
+ if (jit::tracer::isTracing( max, max_indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_out", { max, max_indices, self }, {max, max_indices} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->max_indices_ = SavedVariable(max_indices, true); | |
+ } | |
+ return std::forward_as_tuple(max, max_indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::max(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1718,6 +2325,29 @@ | |
} | |
return std::make_tuple(std::move(max), std::move(max_indices)); | |
} | |
+Tensor & VariableType::s_max_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("max_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<MaxBackward2> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("max_out", { result }); | |
+ grad_fn = std::make_shared<MaxBackward2>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->s_max_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_max(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1757,6 +2387,35 @@ | |
} | |
return result; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("kthvalue_out"); | |
+ auto& values_ = unpack(values, "values", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(values); | |
+ std::shared_ptr<KthvalueBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("kthvalue_out", { values }); | |
+ grad_fn = std::make_shared<KthvalueBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->kthvalue_out(values_, indices_, self_, k, dim, keepdim); | |
+ increment_version(values); | |
+ rebase_history(values, grad_fn); | |
+ if (jit::tracer::isTracing( values, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "kthvalue_out", { values, indices, self }, {values, indices} ); | |
+ setattr(n, jit::stringToSymbol("k"), k); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(values, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("kthvalue"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1782,6 +2441,34 @@ | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("mode_out"); | |
+ auto& values_ = unpack(values, "values", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(values); | |
+ std::shared_ptr<ModeBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("mode_out", { values }); | |
+ grad_fn = std::make_shared<ModeBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->mode_out(values_, indices_, self_, dim, keepdim); | |
+ increment_version(values); | |
+ rebase_history(values, grad_fn); | |
+ if (jit::tracer::isTracing( values, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mode_out", { values, indices, self }, {values, indices} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(values, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::mode(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("mode"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1806,6 +2493,34 @@ | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::median_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("median_out"); | |
+ auto& values_ = unpack(values, "values", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(values); | |
+ std::shared_ptr<MedianBackward1> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("median_out", { values }); | |
+ grad_fn = std::make_shared<MedianBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->median_out(values_, indices_, self_, dim, keepdim); | |
+ increment_version(values); | |
+ rebase_history(values, grad_fn); | |
+ if (jit::tracer::isTracing( values, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "median_out", { values, indices, self }, {values, indices} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(values, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::median(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("median"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1850,6 +2565,33 @@ | |
} | |
return result; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) const { | |
+ profiler::RecordFunction profiler("sort_out"); | |
+ auto& values_ = unpack(values, "values", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(values); | |
+ std::shared_ptr<SortBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sort_out", { values }); | |
+ grad_fn = std::make_shared<SortBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->sort_out(values_, indices_, self_, dim, descending); | |
+ increment_version(values); | |
+ rebase_history(values, grad_fn); | |
+ if (jit::tracer::isTracing( values, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sort_out", { values, indices, self }, {values, indices} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("descending"), descending); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(values, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::sort(const Tensor & self, int64_t dim, bool descending) const { | |
profiler::RecordFunction profiler("sort"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1873,6 +2615,35 @@ | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
+ profiler::RecordFunction profiler("topk_out"); | |
+ auto& values_ = unpack(values, "values", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(values); | |
+ std::shared_ptr<TopkBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("topk_out", { values }); | |
+ grad_fn = std::make_shared<TopkBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->topk_out(values_, indices_, self_, k, dim, largest, sorted); | |
+ increment_version(values); | |
+ rebase_history(values, grad_fn); | |
+ if (jit::tracer::isTracing( values, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "topk_out", { values, indices, self }, {values, indices} ); | |
+ setattr(n, jit::stringToSymbol("k"), k); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("largest"), largest); | |
+ setattr(n, jit::stringToSymbol("sorted"), sorted); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(values, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
profiler::RecordFunction profiler("topk"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1915,6 +2686,27 @@ | |
auto result = baseType->get_device(self_); | |
return result; | |
} | |
+Tensor & VariableType::abs_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("abs_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<AbsBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("abs_out", { result }); | |
+ grad_fn = std::make_shared<AbsBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->abs_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "abs_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::abs(const Tensor & self) const { | |
profiler::RecordFunction profiler("abs"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -1973,6 +2764,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::sigmoid_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("sigmoid_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SigmoidBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sigmoid_out", { result }); | |
+ grad_fn = std::make_shared<SigmoidBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->sigmoid_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sigmoid_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("sigmoid"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2012,6 +2825,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::log_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("log_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<LogBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("log_out", { result }); | |
+ grad_fn = std::make_shared<LogBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->log_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::log(const Tensor & self) const { | |
profiler::RecordFunction profiler("log"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2048,6 +2882,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::log1p_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("log1p_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<Log1PBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("log1p_out", { result }); | |
+ grad_fn = std::make_shared<Log1PBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->log1p_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log1p_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::log1p(const Tensor & self) const { | |
profiler::RecordFunction profiler("log1p"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2065,6 +2920,27 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::lgamma_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("lgamma_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<LgammaBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("lgamma_out", { result }); | |
+ grad_fn = std::make_shared<LgammaBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->lgamma_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "lgamma_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::lgamma(const Tensor & self) const { | |
profiler::RecordFunction profiler("lgamma"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2101,6 +2977,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::digamma_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("digamma_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<DigammaBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("digamma_out", { result }); | |
+ grad_fn = std::make_shared<DigammaBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->digamma_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "digamma_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::digamma(const Tensor & self) const { | |
profiler::RecordFunction profiler("digamma"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2137,6 +3034,28 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::polygamma_out(Tensor & result, int64_t n, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("polygamma_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<PolygammaBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("polygamma_out", { result }); | |
+ grad_fn = std::make_shared<PolygammaBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->n = n; | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->polygamma_out(result_, n, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "polygamma_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("n"), n); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::polygamma(int64_t n, const Tensor & self) const { | |
profiler::RecordFunction profiler("polygamma"); | |
auto& self_ = unpack(self, "self", 1); | |
@@ -2196,6 +3113,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::exp_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("exp_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ExpBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("exp_out", { result }); | |
+ grad_fn = std::make_shared<ExpBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->exp_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "exp_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::exp(const Tensor & self) const { | |
profiler::RecordFunction profiler("exp"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2238,6 +3176,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::expm1_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("expm1_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<Expm1Backward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("expm1_out", { result }); | |
+ grad_fn = std::make_shared<Expm1Backward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->expm1_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "expm1_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::expm1(const Tensor & self) const { | |
profiler::RecordFunction profiler("expm1"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2277,6 +3237,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::cos_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("cos_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<CosBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("cos_out", { result }); | |
+ grad_fn = std::make_shared<CosBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->cos_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "cos_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::cos(const Tensor & self) const { | |
profiler::RecordFunction profiler("cos"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2313,6 +3294,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::acos_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("acos_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<AcosBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("acos_out", { result }); | |
+ grad_fn = std::make_shared<AcosBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->acos_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "acos_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::acos(const Tensor & self) const { | |
profiler::RecordFunction profiler("acos"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2349,6 +3351,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::cosh_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("cosh_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<CoshBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("cosh_out", { result }); | |
+ grad_fn = std::make_shared<CoshBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->cosh_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "cosh_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::cosh(const Tensor & self) const { | |
profiler::RecordFunction profiler("cosh"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2385,6 +3408,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::sin_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("sin_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SinBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sin_out", { result }); | |
+ grad_fn = std::make_shared<SinBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->sin_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sin_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sin(const Tensor & self) const { | |
profiler::RecordFunction profiler("sin"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2421,6 +3465,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::asin_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("asin_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<AsinBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("asin_out", { result }); | |
+ grad_fn = std::make_shared<AsinBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->asin_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "asin_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::asin(const Tensor & self) const { | |
profiler::RecordFunction profiler("asin"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2457,6 +3522,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::sinh_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("sinh_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SinhBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sinh_out", { result }); | |
+ grad_fn = std::make_shared<SinhBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->sinh_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sinh_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sinh(const Tensor & self) const { | |
profiler::RecordFunction profiler("sinh"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2496,6 +3581,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::tan_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("tan_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<TanBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("tan_out", { result }); | |
+ grad_fn = std::make_shared<TanBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->tan_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "tan_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::tan(const Tensor & self) const { | |
profiler::RecordFunction profiler("tan"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2535,6 +3642,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::atan_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("atan_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<AtanBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("atan_out", { result }); | |
+ grad_fn = std::make_shared<AtanBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->atan_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "atan_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::atan(const Tensor & self) const { | |
profiler::RecordFunction profiler("atan"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2574,6 +3701,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::tanh_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("tanh_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<TanhBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("tanh_out", { result }); | |
+ grad_fn = std::make_shared<TanhBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->tanh_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "tanh_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::tanh(const Tensor & self) const { | |
profiler::RecordFunction profiler("tanh"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2613,6 +3762,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::erf_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("erf_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ErfBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("erf_out", { result }); | |
+ grad_fn = std::make_shared<ErfBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->erf_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "erf_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::erf(const Tensor & self) const { | |
profiler::RecordFunction profiler("erf"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2649,6 +3819,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::erfinv_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("erfinv_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ErfinvBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("erfinv_out", { result }); | |
+ grad_fn = std::make_shared<ErfinvBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->erfinv_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "erfinv_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::erfinv(const Tensor & self) const { | |
profiler::RecordFunction profiler("erfinv"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2688,6 +3878,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::sqrt_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("sqrt_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SqrtBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sqrt_out", { result }); | |
+ grad_fn = std::make_shared<SqrtBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->sqrt_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sqrt_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sqrt(const Tensor & self) const { | |
profiler::RecordFunction profiler("sqrt"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2730,6 +3941,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::rsqrt_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("rsqrt_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<RsqrtBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("rsqrt_out", { result }); | |
+ grad_fn = std::make_shared<RsqrtBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->rsqrt_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "rsqrt_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::rsqrt(const Tensor & self) const { | |
profiler::RecordFunction profiler("rsqrt"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2769,6 +4001,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::ceil_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("ceil_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<CeilBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("ceil_out", { result }); | |
+ grad_fn = std::make_shared<CeilBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->ceil_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ceil_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ceil(const Tensor & self) const { | |
profiler::RecordFunction profiler("ceil"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2805,6 +4055,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::floor_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("floor_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<FloorBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("floor_out", { result }); | |
+ grad_fn = std::make_shared<FloorBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->floor_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "floor_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::floor(const Tensor & self) const { | |
profiler::RecordFunction profiler("floor"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2841,6 +4109,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::round_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("round_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<RoundBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("round_out", { result }); | |
+ grad_fn = std::make_shared<RoundBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->round_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "round_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::round(const Tensor & self) const { | |
profiler::RecordFunction profiler("round"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2877,6 +4163,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::trunc_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("trunc_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<TruncBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("trunc_out", { result }); | |
+ grad_fn = std::make_shared<TruncBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->trunc_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "trunc_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::trunc(const Tensor & self) const { | |
profiler::RecordFunction profiler("trunc"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2913,6 +4217,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::frac_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("frac_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<FracBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("frac_out", { result }); | |
+ grad_fn = std::make_shared<FracBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->frac_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "frac_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::frac(const Tensor & self) const { | |
profiler::RecordFunction profiler("frac"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2930,6 +4253,31 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::mean_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("mean_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<MeanBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("mean_out", { result }); | |
+ grad_fn = std::make_shared<MeanBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->self_argsize_dim = self.size(dim); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->mean_out(result_, self_, dim, keepdim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mean_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::mean(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("mean"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -2969,6 +4317,32 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::var_out(Tensor & result, const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
+ profiler::RecordFunction profiler("var_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<VarBackward1> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("var_out", { result }); | |
+ grad_fn = std::make_shared<VarBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ grad_fn->unbiased = unbiased; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->var_out(result_, self_, dim, unbiased, keepdim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "var_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::var(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
profiler::RecordFunction profiler("var"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3009,6 +4383,35 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::std_out(Tensor & result, const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
+ profiler::RecordFunction profiler("std_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<StdBackward1> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("std_out", { result }); | |
+ grad_fn = std::make_shared<StdBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ grad_fn->unbiased = unbiased; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->std_out(result_, self_, dim, unbiased, keepdim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "std_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::std(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
profiler::RecordFunction profiler("std"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3055,6 +4458,35 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::norm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("norm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<NormBackward1> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("norm_out", { result }); | |
+ grad_fn = std::make_shared<NormBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->p = p; | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->norm_out(result_, self_, p, dim, keepdim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "norm_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("p"), p); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("norm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3101,6 +4533,32 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const { | |
+ profiler::RecordFunction profiler("renorm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<RenormBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("renorm_out", { result }); | |
+ grad_fn = std::make_shared<RenormBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->p = p; | |
+ grad_fn->dim = dim; | |
+ grad_fn->maxnorm = maxnorm; | |
+ } | |
+ baseType->renorm_out(result_, self_, p, dim, maxnorm); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "renorm_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("p"), p); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("maxnorm"), maxnorm); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const { | |
profiler::RecordFunction profiler("renorm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3170,6 +4628,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::reciprocal_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("reciprocal_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ReciprocalBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("reciprocal_out", { result }); | |
+ grad_fn = std::make_shared<ReciprocalBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->reciprocal_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reciprocal_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::reciprocal(const Tensor & self) const { | |
profiler::RecordFunction profiler("reciprocal"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3212,6 +4691,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::neg_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("neg_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<NegBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("neg_out", { result }); | |
+ grad_fn = std::make_shared<NegBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->neg_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "neg_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::neg(const Tensor & self) const { | |
profiler::RecordFunction profiler("neg"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3248,6 +4745,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_atan2_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("atan2_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<Atan2Backward> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("atan2_out", { result }); | |
+ grad_fn = std::make_shared<Atan2Backward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->s_atan2_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "atan2_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_atan2(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("atan2"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3288,6 +4808,28 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::pow_out(Tensor & result, const Tensor & self, Scalar exponent) const { | |
+ profiler::RecordFunction profiler("pow_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<PowBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("pow_out", { result }); | |
+ grad_fn = std::make_shared<PowBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->exponent = exponent; | |
+ } | |
+ baseType->pow_out(result_, self_, exponent); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "pow_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("exponent"), exponent); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::pow(const Tensor & self, Scalar exponent) const { | |
profiler::RecordFunction profiler("pow"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3306,6 +4848,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_pow_out(Tensor & result, const Tensor & self, const Tensor & exponent) const { | |
+ profiler::RecordFunction profiler("pow_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& exponent_ = unpack(exponent, "exponent", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<PowBackward1> grad_fn; | |
+ if (compute_requires_grad({ self, exponent })) { | |
+ check_output_args("pow_out", { result }); | |
+ grad_fn = std::make_shared<PowBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, exponent }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->exponent_ = SavedVariable(exponent, false); | |
+ } | |
+ baseType->s_pow_out(result_, self_, exponent_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, exponent )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "pow_out", { result, self, exponent }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_pow(const Tensor & self, const Tensor & exponent) const { | |
profiler::RecordFunction profiler("pow"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3366,6 +4931,28 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_lerp_out(Tensor & result, const Tensor & self, const Tensor & end, Scalar weight) const { | |
+ profiler::RecordFunction profiler("lerp_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& end_ = unpack(end, "end", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<LerpBackward> grad_fn; | |
+ if (compute_requires_grad({ self, end })) { | |
+ check_output_args("lerp_out", { result }); | |
+ grad_fn = std::make_shared<LerpBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, end }); | |
+ grad_fn->weight = weight; | |
+ } | |
+ baseType->s_lerp_out(result_, self_, end_, weight); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, end )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "lerp_out", { result, self, end }, {result} ); | |
+ setattr(n, jit::stringToSymbol("weight"), weight); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_lerp(const Tensor & self, const Tensor & end, Scalar weight) const { | |
profiler::RecordFunction profiler("lerp"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3404,16 +4991,62 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::linspace_out(Tensor & result, Scalar start, Scalar end, int64_t steps) const { | |
+ profiler::RecordFunction profiler("linspace_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->linspace_out(result_, start, end, steps); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "linspace_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("start"), start); | |
+ setattr(n, jit::stringToSymbol("end"), end); | |
+ setattr(n, jit::stringToSymbol("steps"), steps); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::linspace(Scalar start, Scalar end, int64_t steps) const { | |
profiler::RecordFunction profiler("linspace"); | |
auto result = as_variable(baseType->linspace(start, end, steps)); | |
return result; | |
} | |
+Tensor & VariableType::logspace_out(Tensor & result, Scalar start, Scalar end, int64_t steps) const { | |
+ profiler::RecordFunction profiler("logspace_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->logspace_out(result_, start, end, steps); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "logspace_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("start"), start); | |
+ setattr(n, jit::stringToSymbol("end"), end); | |
+ setattr(n, jit::stringToSymbol("steps"), steps); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::logspace(Scalar start, Scalar end, int64_t steps) const { | |
profiler::RecordFunction profiler("logspace"); | |
auto result = as_variable(baseType->logspace(start, end, steps)); | |
return result; | |
} | |
+Tensor & VariableType::histc_out(Tensor & result, const Tensor & self, int64_t bins, Scalar min, Scalar max) const { | |
+ profiler::RecordFunction profiler("histc_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<HistcBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("histc_out", { result }); | |
+ grad_fn = std::make_shared<HistcBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->histc_out(result_, self_, bins, min, max); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "histc_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("bins"), bins); | |
+ setattr(n, jit::stringToSymbol("min"), min); | |
+ setattr(n, jit::stringToSymbol("max"), max); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("histc"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3452,6 +5083,30 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::sum_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("sum_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SumBackward1> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sum_out", { result }); | |
+ grad_fn = std::make_shared<SumBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->sum_out(result_, self_, dim, keepdim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sum_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sum(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("sum"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3489,6 +5144,33 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::prod_out(Tensor & result, const Tensor & self, int64_t dim, bool keepdim) const { | |
+ profiler::RecordFunction profiler("prod_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ProdBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("prod_out", { result }); | |
+ grad_fn = std::make_shared<ProdBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ grad_fn->keepdim = keepdim; | |
+ } | |
+ baseType->prod_out(result_, self_, dim, keepdim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "prod_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->result_ = SavedVariable(result, true); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::prod(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("prod"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3532,6 +5214,27 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::cumsum_out(Tensor & result, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("cumsum_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<CumsumBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("cumsum_out", { result }); | |
+ grad_fn = std::make_shared<CumsumBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->cumsum_out(result_, self_, dim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "cumsum_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::cumsum(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("cumsum"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3549,6 +5252,28 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::cumprod_out(Tensor & result, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("cumprod_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<CumprodBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("cumprod_out", { result }); | |
+ grad_fn = std::make_shared<CumprodBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->cumprod_out(result_, self_, dim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "cumprod_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::cumprod(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("cumprod"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3567,6 +5292,26 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::sign_out(Tensor & result, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("sign_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SignBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sign_out", { result }); | |
+ grad_fn = std::make_shared<SignBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->sign_out(result_, self_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sign_out", { result, self }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sign(const Tensor & self) const { | |
profiler::RecordFunction profiler("sign"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3620,6 +5363,27 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::add_out(Tensor & result, const Tensor & self, Scalar other, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("add_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("add_out", { result }); | |
+ grad_fn = std::make_shared<AddBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->add_out(result_, self_, other, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "add_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::add(const Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3638,6 +5401,28 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_add_out(Tensor & result, const Tensor & self, const Tensor & other, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("add_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddBackward1> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("add_out", { result }); | |
+ grad_fn = std::make_shared<AddBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->alpha = alpha; | |
+ } | |
+ baseType->s_add_out(result_, self_, other_, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "add_out", { result, self, other }, {result} ); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_add(const Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3656,15 +5441,36 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::add_out(Tensor & result, const Tensor & self, SparseTensor other, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("add_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, other.tref })) { | |
+ check_output_args("add_out", { result }); | |
+ grad_fn = std::make_shared<Error>("the derivative for add_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, other.tref }); | |
+ } | |
+ baseType->add_out(result_, self_, other_, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "add_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::add(const Tensor & self, SparseTensor other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
auto other_ = unpack(other, "other", 1); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ self })) { | |
+ if (compute_requires_grad({ self, other.tref })) { | |
grad_fn = std::make_shared<Error>("the derivative for add is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ self }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, other.tref }); | |
} | |
auto result = as_variable(baseType->add(self_, other_, alpha)); | |
set_history(result, grad_fn); | |
@@ -3721,10 +5526,9 @@ | |
auto other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ self })) { | |
+ if (compute_requires_grad({ self, other.tref })) { | |
grad_fn = std::make_shared<Error>("the derivative for add_ is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ self }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, other.tref }); | |
} | |
baseType->add_(self_, other_, alpha); | |
increment_version(self); | |
@@ -3736,6 +5540,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::sub_out(Tensor & result, const Tensor & self, Scalar other, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("sub_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<SubBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("sub_out", { result }); | |
+ grad_fn = std::make_shared<SubBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->sub_out(result_, self_, other, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sub_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::sub(const Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3754,6 +5578,28 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_sub_out(Tensor & result, const Tensor & self, const Tensor & other, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("sub_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<SubBackward1> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("sub_out", { result }); | |
+ grad_fn = std::make_shared<SubBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->alpha = alpha; | |
+ } | |
+ baseType->s_sub_out(result_, self_, other_, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "sub_out", { result, self, other }, {result} ); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_sub(const Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3812,6 +5657,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::mul_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("mul_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<MulBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("mul_out", { result }); | |
+ grad_fn = std::make_shared<MulBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->other = other; | |
+ } | |
+ baseType->mul_out(result_, self_, other); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mul_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::mul(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("mul"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3829,6 +5695,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_mul_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("mul_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<MulBackward1> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("mul_out", { result }); | |
+ grad_fn = std::make_shared<MulBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->s_mul_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mul_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_mul(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("mul"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3888,6 +5777,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::div_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("div_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<DivBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("div_out", { result }); | |
+ grad_fn = std::make_shared<DivBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->other = other; | |
+ } | |
+ baseType->div_out(result_, self_, other); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "div_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::div(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("div"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3905,6 +5815,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_div_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("div_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<DivBackward1> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("div_out", { result }); | |
+ grad_fn = std::make_shared<DivBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->s_div_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "div_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_div(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("div"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3964,6 +5897,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::fmod_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("fmod_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<FmodBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("fmod_out", { result }); | |
+ grad_fn = std::make_shared<FmodBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->fmod_out(result_, self_, other); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "fmod_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::fmod(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("fmod"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -3981,6 +5933,28 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_fmod_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("fmod_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<FmodBackward1> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("fmod_out", { result }); | |
+ grad_fn = std::make_shared<FmodBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->s_fmod_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "fmod_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_fmod(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("fmod"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4038,6 +6011,26 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::remainder_out(Tensor & result, const Tensor & self, Scalar other) const { | |
+ profiler::RecordFunction profiler("remainder_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<RemainderBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("remainder_out", { result }); | |
+ grad_fn = std::make_shared<RemainderBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->remainder_out(result_, self_, other); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "remainder_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("other"), other); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::remainder(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("remainder"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4055,6 +6047,28 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_remainder_out(Tensor & result, const Tensor & self, const Tensor & other) const { | |
+ profiler::RecordFunction profiler("remainder_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ check_no_requires_grad(other, "other"); | |
+ std::shared_ptr<RemainderBackward1> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("remainder_out", { result }); | |
+ grad_fn = std::make_shared<RemainderBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->s_remainder_out(result_, self_, other_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "remainder_out", { result, self, other }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_remainder(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("remainder"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4114,6 +6125,30 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::clamp_out(Tensor & result, const Tensor & self, Scalar min, Scalar max) const { | |
+ profiler::RecordFunction profiler("clamp_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ClampBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("clamp_out", { result }); | |
+ grad_fn = std::make_shared<ClampBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->min = min; | |
+ grad_fn->max = max; | |
+ } | |
+ baseType->clamp_out(result_, self_, min, max); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "clamp_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("min"), min); | |
+ setattr(n, jit::stringToSymbol("max"), max); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::clamp(const Tensor & self, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("clamp"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4156,6 +6191,28 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::clamp_min_out(Tensor & result, const Tensor & self, Scalar min) const { | |
+ profiler::RecordFunction profiler("clamp_min_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ClampMinBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("clamp_min_out", { result }); | |
+ grad_fn = std::make_shared<ClampMinBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->min = min; | |
+ } | |
+ baseType->clamp_min_out(result_, self_, min); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "clamp_min_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("min"), min); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::clamp_min(const Tensor & self, Scalar min) const { | |
profiler::RecordFunction profiler("clamp_min"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4194,6 +6251,28 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::clamp_max_out(Tensor & result, const Tensor & self, Scalar max) const { | |
+ profiler::RecordFunction profiler("clamp_max_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<ClampMaxBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("clamp_max_out", { result }); | |
+ grad_fn = std::make_shared<ClampMaxBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->max = max; | |
+ } | |
+ baseType->clamp_max_out(result_, self_, max); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "clamp_max_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("max"), max); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::clamp_max(const Tensor & self, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_max"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4251,6 +6330,27 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::tril_out(Tensor & result, const Tensor & self, int64_t diagonal) const { | |
+ profiler::RecordFunction profiler("tril_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<TrilBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("tril_out", { result }); | |
+ grad_fn = std::make_shared<TrilBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->diagonal = diagonal; | |
+ } | |
+ baseType->tril_out(result_, self_, diagonal); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "tril_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::tril(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("tril"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4287,6 +6387,27 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::triu_out(Tensor & result, const Tensor & self, int64_t diagonal) const { | |
+ profiler::RecordFunction profiler("triu_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<TriuBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("triu_out", { result }); | |
+ grad_fn = std::make_shared<TriuBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->diagonal = diagonal; | |
+ } | |
+ baseType->triu_out(result_, self_, diagonal); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "triu_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::triu(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("triu"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4323,6 +6444,30 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::cross_out(Tensor & result, const Tensor & self, const Tensor & other, int64_t dim) const { | |
+ profiler::RecordFunction profiler("cross_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& other_ = unpack(other, "other", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<CrossBackward> grad_fn; | |
+ if (compute_requires_grad({ self, other })) { | |
+ check_output_args("cross_out", { result }); | |
+ grad_fn = std::make_shared<CrossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ grad_fn->other_ = SavedVariable(other, false); | |
+ } | |
+ baseType->cross_out(result_, self_, other_, dim); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, other )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "cross_out", { result, self, other }, {result} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::cross(const Tensor & self, const Tensor & other, int64_t dim) const { | |
profiler::RecordFunction profiler("cross"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4343,11 +6488,43 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::eye_out(Tensor & result, int64_t n, int64_t m) const { | |
+ profiler::RecordFunction profiler("eye_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->eye_out(result_, n, m); | |
+ if (jit::tracer::isTracing( result )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "eye_out", { result }, {result} ); | |
+ setattr(n, jit::stringToSymbol("n"), n); | |
+ setattr(n, jit::stringToSymbol("m"), m); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::eye(int64_t n, int64_t m) const { | |
profiler::RecordFunction profiler("eye"); | |
auto result = as_variable(baseType->eye(n, m)); | |
return result; | |
} | |
+Tensor & VariableType::diag_out(Tensor & result, const Tensor & self, int64_t diagonal) const { | |
+ profiler::RecordFunction profiler("diag_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<DiagBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("diag_out", { result }); | |
+ grad_fn = std::make_shared<DiagBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->diagonal = diagonal; | |
+ } | |
+ baseType->diag_out(result_, self_, diagonal); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "diag_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::diag(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("diag"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4365,6 +6542,35 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_addmm_out(Tensor & result, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("addmm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& mat1_ = unpack(mat1, "mat1", 2); | |
+ auto& mat2_ = unpack(mat2, "mat2", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddmmBackward> grad_fn; | |
+ if (compute_requires_grad({ self, mat1, mat2 })) { | |
+ check_output_args("addmm_out", { result }); | |
+ grad_fn = std::make_shared<AddmmBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 }); | |
+ grad_fn->mat1_sizes = mat1.sizes(); | |
+ grad_fn->mat1_ = SavedVariable(mat1, false); | |
+ grad_fn->mat2_ = SavedVariable(mat2, false); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->mat2_sizes = mat2.sizes(); | |
+ grad_fn->beta = beta; | |
+ } | |
+ baseType->s_addmm_out(result_, self_, mat1_, mat2_, beta, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, mat1, mat2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "addmm_out", { result, self, mat1, mat2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4417,6 +6623,33 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_addmv_out(Tensor & result, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("addmv_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& mat_ = unpack(mat, "mat", 2); | |
+ auto& vec_ = unpack(vec, "vec", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddmvBackward> grad_fn; | |
+ if (compute_requires_grad({ self, mat, vec })) { | |
+ check_output_args("addmv_out", { result }); | |
+ grad_fn = std::make_shared<AddmvBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, mat, vec }); | |
+ grad_fn->vec_ = SavedVariable(vec, false); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->beta = beta; | |
+ grad_fn->mat_ = SavedVariable(mat, false); | |
+ } | |
+ baseType->s_addmv_out(result_, self_, mat_, vec_, beta, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, mat, vec )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "addmv_out", { result, self, mat, vec }, {result} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmv"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4465,6 +6698,33 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_addr_out(Tensor & result, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("addr_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& vec1_ = unpack(vec1, "vec1", 2); | |
+ auto& vec2_ = unpack(vec2, "vec2", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddrBackward> grad_fn; | |
+ if (compute_requires_grad({ self, vec1, vec2 })) { | |
+ check_output_args("addr_out", { result }); | |
+ grad_fn = std::make_shared<AddrBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 }); | |
+ grad_fn->beta = beta; | |
+ grad_fn->vec2_ = SavedVariable(vec2, false); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->vec1_ = SavedVariable(vec1, false); | |
+ } | |
+ baseType->s_addr_out(result_, self_, vec1_, vec2_, beta, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, vec1, vec2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "addr_out", { result, self, vec1, vec2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addr"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4513,6 +6773,29 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::ger_out(Tensor & result, const Tensor & self, const Tensor & vec2) const { | |
+ profiler::RecordFunction profiler("ger_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& vec2_ = unpack(vec2, "vec2", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<GerBackward> grad_fn; | |
+ if (compute_requires_grad({ self, vec2 })) { | |
+ check_output_args("ger_out", { result }); | |
+ grad_fn = std::make_shared<GerBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, vec2 }); | |
+ grad_fn->vec2_ = SavedVariable(vec2, false); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->ger_out(result_, self_, vec2_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, vec2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ger_out", { result, self, vec2 }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ger(const Tensor & self, const Tensor & vec2) const { | |
profiler::RecordFunction profiler("ger"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4532,6 +6815,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::mv_out(Tensor & result, const Tensor & self, const Tensor & vec) const { | |
+ profiler::RecordFunction profiler("mv_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& vec_ = unpack(vec, "vec", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<MvBackward> grad_fn; | |
+ if (compute_requires_grad({ self, vec })) { | |
+ check_output_args("mv_out", { result }); | |
+ grad_fn = std::make_shared<MvBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, vec }); | |
+ grad_fn->vec_ = SavedVariable(vec, false); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->mv_out(result_, self_, vec_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, vec )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mv_out", { result, self, vec }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::mv(const Tensor & self, const Tensor & vec) const { | |
profiler::RecordFunction profiler("mv"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4551,6 +6857,31 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::mm_out(Tensor & result, const Tensor & self, const Tensor & mat2) const { | |
+ profiler::RecordFunction profiler("mm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& mat2_ = unpack(mat2, "mat2", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<MmBackward> grad_fn; | |
+ if (compute_requires_grad({ self, mat2 })) { | |
+ check_output_args("mm_out", { result }); | |
+ grad_fn = std::make_shared<MmBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, mat2 }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->mat2_sizes = mat2.sizes(); | |
+ grad_fn->mat2_ = SavedVariable(mat2, false); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ } | |
+ baseType->mm_out(result_, self_, mat2_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, mat2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mm_out", { result, self, mat2 }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::mm(const Tensor & self, const Tensor & mat2) const { | |
profiler::RecordFunction profiler("mm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4572,6 +6903,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::bmm_out(Tensor & result, const Tensor & self, const Tensor & mat2) const { | |
+ profiler::RecordFunction profiler("bmm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& mat2_ = unpack(mat2, "mat2", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<BmmBackward> grad_fn; | |
+ if (compute_requires_grad({ self, mat2 })) { | |
+ check_output_args("bmm_out", { result }); | |
+ grad_fn = std::make_shared<BmmBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, mat2 }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->mat2_ = SavedVariable(mat2, false); | |
+ } | |
+ baseType->bmm_out(result_, self_, mat2_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, mat2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "bmm_out", { result, self, mat2 }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::bmm(const Tensor & self, const Tensor & mat2) const { | |
profiler::RecordFunction profiler("bmm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4591,6 +6945,36 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::s_addbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("addbmm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& batch1_ = unpack(batch1, "batch1", 2); | |
+ auto& batch2_ = unpack(batch2, "batch2", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddbmmBackward> grad_fn; | |
+ if (compute_requires_grad({ self, batch1, batch2 })) { | |
+ check_output_args("addbmm_out", { result }); | |
+ grad_fn = std::make_shared<AddbmmBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
+ grad_fn->batch1_argsize_0 = batch1.size(0); | |
+ grad_fn->batch1_argsize_1 = batch1.size(1); | |
+ grad_fn->batch2_argsize_2 = batch2.size(2); | |
+ grad_fn->batch2_ = SavedVariable(batch2, false); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->batch1_ = SavedVariable(batch1, false); | |
+ grad_fn->beta = beta; | |
+ } | |
+ baseType->s_addbmm_out(result_, self_, batch1_, batch2_, beta, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, batch1, batch2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "addbmm_out", { result, self, batch1, batch2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addbmm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4645,6 +7029,33 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_baddbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
+ profiler::RecordFunction profiler("baddbmm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& batch1_ = unpack(batch1, "batch1", 2); | |
+ auto& batch2_ = unpack(batch2, "batch2", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<BaddbmmBackward> grad_fn; | |
+ if (compute_requires_grad({ self, batch1, batch2 })) { | |
+ check_output_args("baddbmm_out", { result }); | |
+ grad_fn = std::make_shared<BaddbmmBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
+ grad_fn->batch2_ = SavedVariable(batch2, false); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->batch1_ = SavedVariable(batch1, false); | |
+ grad_fn->beta = beta; | |
+ } | |
+ baseType->s_baddbmm_out(result_, self_, batch1_, batch2_, beta, alpha); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, batch1, batch2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "baddbmm_out", { result, self, batch1, batch2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("baddbmm"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4693,6 +7104,31 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_addcmul_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
+ profiler::RecordFunction profiler("addcmul_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& tensor1_ = unpack(tensor1, "tensor1", 2); | |
+ auto& tensor2_ = unpack(tensor2, "tensor2", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddcmulBackward> grad_fn; | |
+ if (compute_requires_grad({ self, tensor1, tensor2 })) { | |
+ check_output_args("addcmul_out", { result }); | |
+ grad_fn = std::make_shared<AddcmulBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
+ grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
+ grad_fn->value = value; | |
+ grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
+ } | |
+ baseType->s_addcmul_out(result_, self_, tensor1_, tensor2_, value); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, tensor1, tensor2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "addcmul_out", { result, self, tensor1, tensor2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("value"), value); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcmul"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4737,6 +7173,31 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::s_addcdiv_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
+ profiler::RecordFunction profiler("addcdiv_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& tensor1_ = unpack(tensor1, "tensor1", 2); | |
+ auto& tensor2_ = unpack(tensor2, "tensor2", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<AddcdivBackward> grad_fn; | |
+ if (compute_requires_grad({ self, tensor1, tensor2 })) { | |
+ check_output_args("addcdiv_out", { result }); | |
+ grad_fn = std::make_shared<AddcdivBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
+ grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
+ grad_fn->value = value; | |
+ grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
+ } | |
+ baseType->s_addcdiv_out(result_, self_, tensor1_, tensor2_, value); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, tensor1, tensor2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "addcdiv_out", { result, self, tensor1, tensor2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("value"), value); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::s_addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcdiv"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4781,6 +7242,32 @@ | |
} | |
return self; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::gesv_out(Tensor & solution, Tensor & lu, const Tensor & self, const Tensor & A) const { | |
+ profiler::RecordFunction profiler("gesv_out"); | |
+ auto& solution_ = unpack(solution, "solution", 0); | |
+ auto& lu_ = unpack(lu, "lu", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& A_ = unpack(A, "A", 3); | |
+ check_inplace(solution); | |
+ std::shared_ptr<GesvBackward> grad_fn; | |
+ if (compute_requires_grad({ self, A })) { | |
+ check_output_args("gesv_out", { solution }); | |
+ grad_fn = std::make_shared<GesvBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, A }); | |
+ grad_fn->A_ = SavedVariable(A, false); | |
+ } | |
+ baseType->gesv_out(solution_, lu_, self_, A_); | |
+ increment_version(solution); | |
+ rebase_history(solution, grad_fn); | |
+ if (jit::tracer::isTracing( solution, lu, self, A )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "gesv_out", { solution, lu, self, A }, {solution, lu} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->solution_ = SavedVariable(solution, true); | |
+ } | |
+ return std::forward_as_tuple(solution, lu); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::gesv(const Tensor & self, const Tensor & A) const { | |
profiler::RecordFunction profiler("gesv"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4803,6 +7290,30 @@ | |
} | |
return std::make_tuple(std::move(solution), std::move(lu)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::gels_out(Tensor & res1, Tensor & res2, const Tensor & self, const Tensor & A) const { | |
+ profiler::RecordFunction profiler("gels_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& A_ = unpack(A, "A", 3); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<GelsBackward> grad_fn; | |
+ if (compute_requires_grad({ self, A })) { | |
+ check_output_args("gels_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<GelsBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, A }); | |
+ } | |
+ baseType->gels_out(res1_, res2_, self_, A_); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self, A )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "gels_out", { res1, res2, self, A }, {res1, res2} ); | |
+ (void)n; | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::gels(const Tensor & self, const Tensor & A) const { | |
profiler::RecordFunction profiler("gels"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4822,6 +7332,40 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::trtrs_out(Tensor & res1, Tensor & res2, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
+ profiler::RecordFunction profiler("trtrs_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& A_ = unpack(A, "A", 3); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<TrtrsBackward> grad_fn; | |
+ if (compute_requires_grad({ self, A })) { | |
+ check_output_args("trtrs_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<TrtrsBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, A }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->A_ = SavedVariable(A, false); | |
+ grad_fn->upper = upper; | |
+ grad_fn->transpose = transpose; | |
+ grad_fn->unitriangular = unitriangular; | |
+ } | |
+ baseType->trtrs_out(res1_, res2_, self_, A_, upper, transpose, unitriangular); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self, A )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "trtrs_out", { res1, res2, self, A }, {res1, res2} ); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ setattr(n, jit::stringToSymbol("transpose"), transpose); | |
+ setattr(n, jit::stringToSymbol("unitriangular"), unitriangular); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->res1_ = SavedVariable(res1, true); | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
profiler::RecordFunction profiler("trtrs"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4850,6 +7394,30 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::symeig_out(Tensor & res1, Tensor & res2, const Tensor & self, bool eigenvectors, bool upper) const { | |
+ profiler::RecordFunction profiler("symeig_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<SymeigBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("symeig_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<SymeigBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->symeig_out(res1_, res2_, self_, eigenvectors, upper); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "symeig_out", { res1, res2, self }, {res1, res2} ); | |
+ setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::symeig(const Tensor & self, bool eigenvectors, bool upper) const { | |
profiler::RecordFunction profiler("symeig"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4869,6 +7436,29 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::eig_out(Tensor & res1, Tensor & res2, const Tensor & self, bool eigenvectors) const { | |
+ profiler::RecordFunction profiler("eig_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<EigBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("eig_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<EigBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->eig_out(res1_, res2_, self_, eigenvectors); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "eig_out", { res1, res2, self }, {res1, res2} ); | |
+ setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors); | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::eig(const Tensor & self, bool eigenvectors) const { | |
profiler::RecordFunction profiler("eig"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4887,6 +7476,39 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::svd_out(Tensor & res1, Tensor & res2, Tensor & res3, const Tensor & self, bool some) const { | |
+ profiler::RecordFunction profiler("svd_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& res3_ = unpack(res3, "res3", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ check_inplace(res3); | |
+ std::shared_ptr<SvdBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("svd_out", { res1, res2, res3 }); | |
+ grad_fn = std::make_shared<SvdBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->some = some; | |
+ } | |
+ baseType->svd_out(res1_, res2_, res3_, self_, some); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ increment_version(res3); | |
+ rebase_history({ res1, res2, res3 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, res3, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "svd_out", { res1, res2, res3, self }, {res1, res2, res3} ); | |
+ setattr(n, jit::stringToSymbol("some"), some); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->res1_ = SavedVariable(res1, true); | |
+ grad_fn->res2_ = SavedVariable(res2, true); | |
+ grad_fn->res3_ = SavedVariable(res3, true); | |
+ } | |
+ return std::forward_as_tuple(res1, res2, res3); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::svd(const Tensor & self, bool some) const { | |
profiler::RecordFunction profiler("svd"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4911,6 +7533,29 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2), std::move(res3)); | |
} | |
+Tensor & VariableType::inverse_out(Tensor & output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("inverse_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<InverseBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("inverse_out", { output }); | |
+ grad_fn = std::make_shared<InverseBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->inverse_out(output_, self_); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "inverse_out", { output, self }, {output} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::inverse(const Tensor & self) const { | |
profiler::RecordFunction profiler("inverse"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4931,6 +7575,30 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::potrf_out(Tensor & output, const Tensor & self, bool upper) const { | |
+ profiler::RecordFunction profiler("potrf_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<PotrfBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("potrf_out", { output }); | |
+ grad_fn = std::make_shared<PotrfBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->upper = upper; | |
+ } | |
+ baseType->potrf_out(output_, self_, upper); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "potrf_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::potrf(const Tensor & self, bool upper) const { | |
profiler::RecordFunction profiler("potrf"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4951,6 +7619,27 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::potrs_out(Tensor & result, const Tensor & self, const Tensor & input2, bool upper) const { | |
+ profiler::RecordFunction profiler("potrs_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& input2_ = unpack(input2, "input2", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<PotrsBackward> grad_fn; | |
+ if (compute_requires_grad({ self, input2 })) { | |
+ check_output_args("potrs_out", { result }); | |
+ grad_fn = std::make_shared<PotrsBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, input2 }); | |
+ } | |
+ baseType->potrs_out(result_, self_, input2_, upper); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, input2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "potrs_out", { result, self, input2 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::potrs(const Tensor & self, const Tensor & input2, bool upper) const { | |
profiler::RecordFunction profiler("potrs"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4969,6 +7657,26 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::potri_out(Tensor & output, const Tensor & self, bool upper) const { | |
+ profiler::RecordFunction profiler("potri_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<PotriBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("potri_out", { output }); | |
+ grad_fn = std::make_shared<PotriBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->potri_out(output_, self_, upper); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "potri_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::potri(const Tensor & self, bool upper) const { | |
profiler::RecordFunction profiler("potri"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -4986,6 +7693,30 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::pstrf_out(Tensor & res1, Tensor & res2, const Tensor & self, bool upper, Scalar tol) const { | |
+ profiler::RecordFunction profiler("pstrf_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<PstrfBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("pstrf_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<PstrfBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->pstrf_out(res1_, res2_, self_, upper, tol); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "pstrf_out", { res1, res2, self }, {res1, res2} ); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ setattr(n, jit::stringToSymbol("tol"), tol); | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::pstrf(const Tensor & self, bool upper, Scalar tol) const { | |
profiler::RecordFunction profiler("pstrf"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5005,6 +7735,29 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::qr_out(Tensor & res1, Tensor & res2, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("qr_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<QrBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("qr_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<QrBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->qr_out(res1_, res2_, self_); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "qr_out", { res1, res2, self }, {res1, res2} ); | |
+ (void)n; | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::qr(const Tensor & self) const { | |
profiler::RecordFunction profiler("qr"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5023,6 +7775,29 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::geqrf_out(Tensor & res1, Tensor & res2, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("geqrf_out"); | |
+ auto& res1_ = unpack(res1, "res1", 0); | |
+ auto& res2_ = unpack(res2, "res2", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(res1); | |
+ check_inplace(res2); | |
+ std::shared_ptr<GeqrfBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("geqrf_out", { res1, res2 }); | |
+ grad_fn = std::make_shared<GeqrfBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->geqrf_out(res1_, res2_, self_); | |
+ increment_version(res1); | |
+ increment_version(res2); | |
+ rebase_history({ res1, res2 }, grad_fn); | |
+ if (jit::tracer::isTracing( res1, res2, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "geqrf_out", { res1, res2, self }, {res1, res2} ); | |
+ (void)n; | |
+ } | |
+ return std::forward_as_tuple(res1, res2); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::geqrf(const Tensor & self) const { | |
profiler::RecordFunction profiler("geqrf"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5041,6 +7815,27 @@ | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
+Tensor & VariableType::orgqr_out(Tensor & result, const Tensor & self, const Tensor & input2) const { | |
+ profiler::RecordFunction profiler("orgqr_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& input2_ = unpack(input2, "input2", 2); | |
+ check_inplace(result); | |
+ std::shared_ptr<OrgqrBackward> grad_fn; | |
+ if (compute_requires_grad({ self, input2 })) { | |
+ check_output_args("orgqr_out", { result }); | |
+ grad_fn = std::make_shared<OrgqrBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, input2 }); | |
+ } | |
+ baseType->orgqr_out(result_, self_, input2_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, input2 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "orgqr_out", { result, self, input2 }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::orgqr(const Tensor & self, const Tensor & input2) const { | |
profiler::RecordFunction profiler("orgqr"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5059,6 +7853,29 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::ormqr_out(Tensor & result, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { | |
+ profiler::RecordFunction profiler("ormqr_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& input2_ = unpack(input2, "input2", 2); | |
+ auto& input3_ = unpack(input3, "input3", 3); | |
+ check_inplace(result); | |
+ std::shared_ptr<OrmqrBackward> grad_fn; | |
+ if (compute_requires_grad({ self, input2, input3 })) { | |
+ check_output_args("ormqr_out", { result }); | |
+ grad_fn = std::make_shared<OrmqrBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, input2, input3 }); | |
+ } | |
+ baseType->ormqr_out(result_, self_, input2_, input3_, left, transpose); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, input2, input3 )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "ormqr_out", { result, self, input2, input3 }, {result} ); | |
+ setattr(n, jit::stringToSymbol("left"), left); | |
+ setattr(n, jit::stringToSymbol("transpose"), transpose); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { | |
profiler::RecordFunction profiler("ormqr"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5079,6 +7895,29 @@ | |
} | |
return result; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::btrifact_out(Tensor & result, Tensor & pivots, const Tensor & self, bool pivot) const { | |
+ profiler::RecordFunction profiler("btrifact_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& pivots_ = unpack(pivots, "pivots", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(result); | |
+ check_inplace(pivots); | |
+ std::shared_ptr<BtrifactBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("btrifact_out", { result, pivots }); | |
+ grad_fn = std::make_shared<BtrifactBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->btrifact_out(result_, pivots_, self_, pivot); | |
+ increment_version(result); | |
+ increment_version(pivots); | |
+ rebase_history({ result, pivots }, grad_fn); | |
+ if (jit::tracer::isTracing( result, pivots, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "btrifact_out", { result, pivots, self }, {result, pivots} ); | |
+ setattr(n, jit::stringToSymbol("pivot"), pivot); | |
+ } | |
+ return std::forward_as_tuple(result, pivots); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::btrifact(const Tensor & self, bool pivot) const { | |
profiler::RecordFunction profiler("btrifact"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5097,6 +7935,32 @@ | |
} | |
return std::make_tuple(std::move(result), std::move(pivots)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::btrifact_with_info_out(Tensor & result, Tensor & pivots, Tensor & info, const Tensor & self, bool pivot) const { | |
+ profiler::RecordFunction profiler("btrifact_with_info_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& pivots_ = unpack(pivots, "pivots", 1); | |
+ auto& info_ = unpack(info, "info", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ check_inplace(result); | |
+ check_inplace(pivots); | |
+ check_inplace(info); | |
+ std::shared_ptr<BtrifactWithInfoBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("btrifact_with_info_out", { result, pivots, info }); | |
+ grad_fn = std::make_shared<BtrifactWithInfoBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->btrifact_with_info_out(result_, pivots_, info_, self_, pivot); | |
+ increment_version(result); | |
+ increment_version(pivots); | |
+ increment_version(info); | |
+ rebase_history({ result, pivots, info }, grad_fn); | |
+ if (jit::tracer::isTracing( result, pivots, info, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "btrifact_with_info_out", { result, pivots, info, self }, {result, pivots, info} ); | |
+ setattr(n, jit::stringToSymbol("pivot"), pivot); | |
+ } | |
+ return std::forward_as_tuple(result, pivots, info); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::btrifact_with_info(const Tensor & self, bool pivot) const { | |
profiler::RecordFunction profiler("btrifact_with_info"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5115,6 +7978,30 @@ | |
} | |
return std::make_tuple(std::move(result), std::move(pivots), std::move(info)); | |
} | |
+Tensor & VariableType::btrisolve_out(Tensor & result, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) const { | |
+ profiler::RecordFunction profiler("btrisolve_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& LU_data_ = unpack(LU_data, "LU_data", 2); | |
+ auto& LU_pivots_ = unpack(LU_pivots, "LU_pivots", 3); | |
+ check_inplace(result); | |
+ check_no_requires_grad(LU_data, "LU_data"); | |
+ check_no_requires_grad(LU_pivots, "LU_pivots"); | |
+ std::shared_ptr<BtrisolveBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("btrisolve_out", { result }); | |
+ grad_fn = std::make_shared<BtrisolveBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->btrisolve_out(result_, self_, LU_data_, LU_pivots_); | |
+ increment_version(result); | |
+ rebase_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self, LU_data, LU_pivots )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "btrisolve_out", { result, self, LU_data, LU_pivots }, {result} ); | |
+ (void)n; | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) const { | |
profiler::RecordFunction profiler("btrisolve"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5136,6 +8022,12 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::randperm_out(Tensor & result, int64_t n, Generator * generator) const { | |
+ profiler::RecordFunction profiler("randperm_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->randperm_out(result_, n, generator); | |
+ return result; | |
+} | |
Tensor VariableType::randperm(int64_t n, Generator * generator) const { | |
profiler::RecordFunction profiler("randperm"); | |
auto result = as_variable(baseType->randperm(n, generator)); | |
@@ -5186,6 +8075,13 @@ | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
+Tensor & VariableType::multinomial_out(Tensor & result, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const { | |
+ profiler::RecordFunction profiler("multinomial_out"); | |
+ auto& result_ = unpack_long(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ baseType->multinomial_out(result_, self_, num_samples, replacement, generator); | |
+ return result; | |
+} | |
Tensor VariableType::multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const { | |
profiler::RecordFunction profiler("multinomial"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5207,6 +8102,23 @@ | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
+Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) const { | |
+ profiler::RecordFunction profiler("normal_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& mean_ = unpack(mean, "mean", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<NormalBackward1> grad_fn; | |
+ if (compute_requires_grad({ mean })) { | |
+ check_output_args("normal_out", { output }); | |
+ grad_fn = std::make_shared<NormalBackward1>(); | |
+ grad_fn->next_functions = compute_next_functions({ mean }); | |
+ grad_fn->mean_sizes = mean.sizes(); | |
+ } | |
+ baseType->normal_out(output_, mean_, std, generator); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ return output; | |
+} | |
Tensor VariableType::normal(const Tensor & mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& mean_ = unpack(mean, "mean", 0); | |
@@ -5220,6 +8132,23 @@ | |
set_history(output, grad_fn); | |
return output; | |
} | |
+Tensor & VariableType::normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) const { | |
+ profiler::RecordFunction profiler("normal_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& std_ = unpack(std, "std", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<NormalBackward2> grad_fn; | |
+ if (compute_requires_grad({ std })) { | |
+ check_output_args("normal_out", { output }); | |
+ grad_fn = std::make_shared<NormalBackward2>(); | |
+ grad_fn->next_functions = compute_next_functions({ std }); | |
+ grad_fn->std_sizes = std.sizes(); | |
+ } | |
+ baseType->normal_out(output_, mean, std_, generator); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ return output; | |
+} | |
Tensor VariableType::normal(double mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& std_ = unpack(std, "std", 1); | |
@@ -5233,6 +8162,25 @@ | |
set_history(output, grad_fn); | |
return output; | |
} | |
+Tensor & VariableType::normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) const { | |
+ profiler::RecordFunction profiler("normal_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& mean_ = unpack(mean, "mean", 1); | |
+ auto& std_ = unpack(std, "std", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<NormalBackward3> grad_fn; | |
+ if (compute_requires_grad({ mean, std })) { | |
+ check_output_args("normal_out", { output }); | |
+ grad_fn = std::make_shared<NormalBackward3>(); | |
+ grad_fn->next_functions = compute_next_functions({ mean, std }); | |
+ grad_fn->mean_sizes = mean.sizes(); | |
+ grad_fn->std_sizes = std.sizes(); | |
+ } | |
+ baseType->normal_out(output_, mean_, std_, generator); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ return output; | |
+} | |
Tensor VariableType::normal(const Tensor & mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& mean_ = unpack(mean, "mean", 0); | |
@@ -5308,11 +8252,23 @@ | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
+Tensor & VariableType::rand_out(Tensor & result, IntList size, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rand_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->rand_out(result_, size, generator); | |
+ return result; | |
+} | |
Tensor VariableType::rand(IntList size, Generator * generator) const { | |
profiler::RecordFunction profiler("rand"); | |
auto result = as_variable(baseType->rand(size, generator)); | |
return result; | |
} | |
+Tensor & VariableType::randn_out(Tensor & result, IntList size, Generator * generator) const { | |
+ profiler::RecordFunction profiler("randn_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ baseType->randn_out(result_, size, generator); | |
+ return result; | |
+} | |
Tensor VariableType::randn(IntList size, Generator * generator) const { | |
profiler::RecordFunction profiler("randn"); | |
auto result = as_variable(baseType->randn(size, generator)); | |
@@ -5333,6 +8288,22 @@ | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
+Tensor & VariableType::bernoulli_out(Tensor & output, const Tensor & self, Generator * generator) const { | |
+ profiler::RecordFunction profiler("bernoulli_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<BernoulliBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("bernoulli_out", { output }); | |
+ grad_fn = std::make_shared<BernoulliBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->bernoulli_out(output_, self_, generator); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ return output; | |
+} | |
Tensor VariableType::bernoulli(const Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5346,6 +8316,26 @@ | |
set_history(output, grad_fn); | |
return output; | |
} | |
+Tensor & VariableType::_standard_gamma_out(Tensor & output, const Tensor & self, Generator * generator) const { | |
+ profiler::RecordFunction profiler("_standard_gamma_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<StandardGammaBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("_standard_gamma_out", { output }); | |
+ grad_fn = std::make_shared<StandardGammaBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->_standard_gamma_out(output_, self_, generator); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("_standard_gamma"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5437,6 +8424,32 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::as_strided_out(Tensor & result, const Tensor & self, IntList size, IntList stride, int64_t storage_offset) const { | |
+ profiler::RecordFunction profiler("as_strided_out"); | |
+ auto& result_ = unpack(result, "result", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(result); | |
+ std::shared_ptr<AsStridedBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("as_strided_out", { result }); | |
+ grad_fn = std::make_shared<AsStridedBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_geometry = TensorGeometry(self); | |
+ grad_fn->size = size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->storage_offset = storage_offset; | |
+ } | |
+ baseType->as_strided_out(result_, self_, size, stride, storage_offset); | |
+ increment_version(result); | |
+ set_history(result, grad_fn); | |
+ if (jit::tracer::isTracing( result, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "as_strided_out", { result, self }, {result} ); | |
+ setattr(n, jit::stringToSymbol("size"), size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("storage_offset"), storage_offset); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::as_strided(const Tensor & self, IntList size, IntList stride, int64_t storage_offset) const { | |
profiler::RecordFunction profiler("as_strided"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5484,6 +8497,28 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::cat_out(Tensor & self, TensorList tensors, int64_t dim) const { | |
+ profiler::RecordFunction profiler("cat_out"); | |
+ auto& self_ = unpack(self, "self", 0); | |
+ auto tensors_ = unpack(tensors, "tensors", 1); | |
+ check_inplace(self); | |
+ std::shared_ptr<CatBackward> grad_fn; | |
+ if (compute_requires_grad({ tensors })) { | |
+ check_output_args("cat_out", { self }); | |
+ grad_fn = std::make_shared<CatBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ tensors }); | |
+ grad_fn->tensors_sizes_dim = to_arg_sizes(tensors, dim); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->cat_out(self_, tensors_, dim); | |
+ increment_version(self); | |
+ rebase_history(self, grad_fn); | |
+ if (jit::tracer::isTracing( self, tensors )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "cat_out", flatten( self, tensors ), {self} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return self; | |
+} | |
Tensor VariableType::cat(TensorList tensors, int64_t dim) const { | |
profiler::RecordFunction profiler("cat"); | |
auto tensors_ = unpack(tensors, "tensors", 0); | |
@@ -5527,10 +8561,9 @@ | |
auto& self_ = unpack(self, "self", 0); | |
auto mask_ = unpack(mask, "mask", 1); | |
std::shared_ptr<SparseMaskBackward> grad_fn; | |
- if (compute_requires_grad({ self })) { | |
+ if (compute_requires_grad({ self, mask.tref })) { | |
grad_fn = std::make_shared<SparseMaskBackward>(); | |
- grad_fn->next_functions = compute_next_functions({ self }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, mask.tref }); | |
} | |
auto result = as_variable(baseType->_sparse_mask(self_, mask_)); | |
set_history(result, grad_fn); | |
@@ -5574,6 +8605,16 @@ | |
} | |
return result; | |
} | |
+Tensor & VariableType::binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("binary_cross_entropy_out"); | |
+ Type::binary_cross_entropy_out(output, self, target, weight, size_average, reduce); | |
+ if (jit::tracer::isTracing( output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_out", { output, self, target, weight }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy"); | |
auto output = Type::binary_cross_entropy(self, target, weight, size_average, reduce); | |
@@ -5584,6 +8625,36 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::binary_cross_entropy_forward_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("binary_cross_entropy_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ auto weight_ = unpack_opt(weight, "weight", 3); | |
+ check_inplace(output); | |
+ check_no_requires_grad(target, "target"); | |
+ check_no_requires_grad(weight, "weight"); | |
+ std::shared_ptr<BinaryCrossEntropyBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("binary_cross_entropy_forward_out", { output }); | |
+ grad_fn = std::make_shared<BinaryCrossEntropyBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->binary_cross_entropy_forward_out(output_, self_, target_, weight_, size_average, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_forward_out", { output, self, target, weight }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::binary_cross_entropy_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5610,6 +8681,30 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("binary_cross_entropy_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack(target, "target", 3); | |
+ auto weight_ = unpack_opt(weight, "weight", 4); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, target, weight })) { | |
+ check_output_args("binary_cross_entropy_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<Error>("the derivative for binary_cross_entropy_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, target, weight }); | |
+ } | |
+ baseType->binary_cross_entropy_backward_out(grad_input_, grad_output_, self_, target_, weight_, size_average, reduce); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_backward_out", { grad_input, grad_output, self, target, weight }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -5631,6 +8725,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::kl_div_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("kl_div_out"); | |
+ Type::kl_div_out(output, self, target, size_average, reduce); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "kl_div_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::kl_div(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div"); | |
auto output = Type::kl_div(self, target, size_average, reduce); | |
@@ -5641,6 +8745,33 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::kl_div_forward_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("kl_div_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ check_inplace(output); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<KlDivBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("kl_div_forward_out", { output }); | |
+ grad_fn = std::make_shared<KlDivBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->kl_div_forward_out(output_, self_, target_, size_average, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "kl_div_forward_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::kl_div_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5664,6 +8795,34 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::kl_div_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("kl_div_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack(target, "target", 3); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<KlDivBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("kl_div_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<KlDivBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->kl_div_backward_out(grad_input_, grad_output_, self_, target_, size_average, reduce); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "kl_div_backward_out", { grad_input, grad_output, self, target }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -5688,6 +8847,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("l1_loss_out"); | |
+ Type::l1_loss_out(output, self, target, size_average, reduce); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "l1_loss_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss"); | |
auto output = Type::l1_loss(self, target, size_average, reduce); | |
@@ -5698,6 +8867,33 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::l1_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("l1_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ check_inplace(output); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<L1LossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("l1_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<L1LossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->l1_loss_forward_out(output_, self_, target_, size_average, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "l1_loss_forward_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::l1_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5721,6 +8917,34 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("l1_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack(target, "target", 3); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<L1LossBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("l1_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<L1LossBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->l1_loss_backward_out(grad_input_, grad_output_, self_, target_, size_average, reduce); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "l1_loss_backward_out", { grad_input, grad_output, self, target }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -5745,6 +8969,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("mse_loss_out"); | |
+ Type::mse_loss_out(output, self, target, size_average, reduce); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mse_loss_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::mse_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss"); | |
auto output = Type::mse_loss(self, target, size_average, reduce); | |
@@ -5755,6 +8989,33 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::mse_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("mse_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ check_inplace(output); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<MseLossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("mse_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<MseLossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->mse_loss_forward_out(output_, self_, target_, size_average, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mse_loss_forward_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::mse_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5778,6 +9039,35 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("mse_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack(target, "target", 3); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<MseLossBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("mse_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<MseLossBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->mse_loss_backward_out(grad_input_, grad_output_, self_, target_, size_average, reduce); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "mse_loss_backward_out", { grad_input, grad_output, self, target }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -5803,6 +9093,17 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
+ profiler::RecordFunction profiler("multi_margin_loss_out"); | |
+ Type::multi_margin_loss_out(output, self, target, p, margin, weight, size_average); | |
+ if (jit::tracer::isTracing( output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_out", { output, self, target, weight }, {output} ); | |
+ setattr(n, jit::stringToSymbol("p"), p); | |
+ setattr(n, jit::stringToSymbol("margin"), margin); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss"); | |
auto output = Type::multi_margin_loss(self, target, p, margin, weight, size_average); | |
@@ -5814,6 +9115,37 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::multi_margin_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
+ profiler::RecordFunction profiler("multi_margin_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack_long(target, "target", 2); | |
+ auto weight_ = unpack_opt(weight, "weight", 5); | |
+ check_inplace(output); | |
+ check_no_requires_grad(weight, "weight"); | |
+ std::shared_ptr<MultiMarginLossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("multi_margin_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<MultiMarginLossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->p = p; | |
+ grad_fn->margin = margin; | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->size_average = size_average; | |
+ } | |
+ baseType->multi_margin_loss_forward_out(output_, self_, target_, p, margin, weight_, size_average); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_forward_out", { output, self, target, weight }, {output} ); | |
+ setattr(n, jit::stringToSymbol("p"), p); | |
+ setattr(n, jit::stringToSymbol("margin"), margin); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::multi_margin_loss_forward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5841,16 +9173,39 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
+ profiler::RecordFunction profiler("multi_margin_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack_long(target, "target", 2); | |
+ auto weight_ = unpack_opt(weight, "weight", 5); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, weight })) { | |
+ check_output_args("multi_margin_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<Error>("the derivative for multi_margin_loss_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight }); | |
+ } | |
+ baseType->multi_margin_loss_backward_out(grad_input_, self_, target_, p, margin, weight_, size_average); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_backward_out", { grad_input, self, target, weight }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("p"), p); | |
+ setattr(n, jit::stringToSymbol("margin"), margin); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::multi_margin_loss_backward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 4); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ self, target, weight })) { | |
+ if (compute_requires_grad({ self, weight })) { | |
grad_fn = std::make_shared<Error>("the derivative for multi_margin_loss_backward is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ self, target, weight }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, weight }); | |
} | |
auto grad_input = as_variable(baseType->multi_margin_loss_backward(self_, target_, p, margin, weight_, size_average)); | |
set_history(grad_input, grad_fn); | |
@@ -5862,6 +9217,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average) const { | |
+ profiler::RecordFunction profiler("multilabel_margin_loss_out"); | |
+ Type::multilabel_margin_loss_out(output, self, target, size_average); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::multilabel_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss"); | |
auto output = Type::multilabel_margin_loss(self, target, size_average); | |
@@ -5871,6 +9235,34 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, bool size_average) const { | |
+ profiler::RecordFunction profiler("multilabel_margin_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& is_target_ = unpack(is_target, "is_target", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack_long(target, "target", 3); | |
+ check_inplace(output); | |
+ std::shared_ptr<MultilabelMarginLossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("multilabel_margin_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<MultilabelMarginLossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ } | |
+ baseType->multilabel_margin_loss_forward_out(output_, is_target_, self_, target_, size_average); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, is_target, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_forward_out", { output, is_target, self, target }, {output, is_target} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->is_target_ = SavedVariable(is_target, true); | |
+ } | |
+ return std::forward_as_tuple(output, is_target); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5895,16 +9287,37 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(is_target)); | |
} | |
+Tensor & VariableType::multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & self, const Tensor & target, bool size_average, const Tensor & is_target) const { | |
+ profiler::RecordFunction profiler("multilabel_margin_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack_long(target, "target", 2); | |
+ auto& is_target_ = unpack(is_target, "is_target", 4); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self, is_target })) { | |
+ check_output_args("multilabel_margin_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<Error>("the derivative for multilabel_margin_loss_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self, is_target }); | |
+ } | |
+ baseType->multilabel_margin_loss_backward_out(grad_input_, self_, target_, size_average, is_target_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, self, target, is_target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_backward_out", { grad_input, self, target, is_target }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::multilabel_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average, const Tensor & is_target) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto& is_target_ = unpack(is_target, "is_target", 3); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ self, target, is_target })) { | |
+ if (compute_requires_grad({ self, is_target })) { | |
grad_fn = std::make_shared<Error>("the derivative for multilabel_margin_loss_backward is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ self, target, is_target }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ self, is_target }); | |
} | |
auto grad_input = as_variable(baseType->multilabel_margin_loss_backward(self_, target_, size_average, is_target_)); | |
set_history(grad_input, grad_fn); | |
@@ -5914,6 +9327,17 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
+ profiler::RecordFunction profiler("nll_loss_out"); | |
+ Type::nll_loss_out(output, self, target, weight, size_average, ignore_index, reduce); | |
+ if (jit::tracer::isTracing( output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss_out", { output, self, target, weight }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss"); | |
auto output = Type::nll_loss(self, target, weight, size_average, ignore_index, reduce); | |
@@ -5925,6 +9349,41 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
+ profiler::RecordFunction profiler("nll_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& total_weight_ = unpack(total_weight, "total_weight", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack_long(target, "target", 3); | |
+ auto weight_ = unpack_opt(weight, "weight", 4); | |
+ check_inplace(output); | |
+ check_no_requires_grad(weight, "weight"); | |
+ std::shared_ptr<NllLossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("nll_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<NllLossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->ignore_index = ignore_index; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->nll_loss_forward_out(output_, total_weight_, self_, target_, weight_, size_average, ignore_index, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, total_weight, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss_forward_out", { output, total_weight, self, target, weight }, {output, total_weight} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->total_weight_ = SavedVariable(total_weight, true); | |
+ } | |
+ return std::forward_as_tuple(output, total_weight); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -5956,6 +9415,39 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(total_weight)); | |
} | |
+Tensor & VariableType::nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
+ profiler::RecordFunction profiler("nll_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack_long(target, "target", 3); | |
+ auto weight_ = unpack_opt(weight, "weight", 4); | |
+ auto& total_weight_ = unpack(total_weight, "total_weight", 8); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(weight, "weight"); | |
+ check_no_requires_grad(total_weight, "total_weight"); | |
+ std::shared_ptr<NllLossBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("nll_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<NllLossBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->ignore_index = ignore_index; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->nll_loss_backward_out(grad_input_, grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target, weight, total_weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss_backward_out", { grad_input, grad_output, self, target, weight, total_weight }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -5985,6 +9477,17 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
+ profiler::RecordFunction profiler("nll_loss2d_out"); | |
+ Type::nll_loss2d_out(output, self, target, weight, size_average, ignore_index, reduce); | |
+ if (jit::tracer::isTracing( output, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_out", { output, self, target, weight }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss2d"); | |
auto output = Type::nll_loss2d(self, target, weight, size_average, ignore_index, reduce); | |
@@ -5996,6 +9499,41 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
+ profiler::RecordFunction profiler("nll_loss2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& total_weight_ = unpack(total_weight, "total_weight", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack_long(target, "target", 3); | |
+ auto weight_ = unpack_opt(weight, "weight", 4); | |
+ check_inplace(output); | |
+ check_no_requires_grad(weight, "weight"); | |
+ std::shared_ptr<NllLoss2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("nll_loss2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<NllLoss2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->ignore_index = ignore_index; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->nll_loss2d_forward_out(output_, total_weight_, self_, target_, weight_, size_average, ignore_index, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, total_weight, self, target, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_forward_out", { output, total_weight, self, target, weight }, {output, total_weight} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->total_weight_ = SavedVariable(total_weight, true); | |
+ } | |
+ return std::forward_as_tuple(output, total_weight); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6027,6 +9565,39 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(total_weight)); | |
} | |
+Tensor & VariableType::nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
+ profiler::RecordFunction profiler("nll_loss2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack_long(target, "target", 3); | |
+ auto weight_ = unpack_opt(weight, "weight", 4); | |
+ auto& total_weight_ = unpack(total_weight, "total_weight", 8); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(weight, "weight"); | |
+ check_no_requires_grad(total_weight, "total_weight"); | |
+ std::shared_ptr<NllLoss2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("nll_loss2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<NllLoss2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->ignore_index = ignore_index; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->nll_loss2d_backward_out(grad_input_, grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target, weight, total_weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_backward_out", { grad_input, grad_output, self, target, weight, total_weight }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6056,6 +9627,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("smooth_l1_loss_out"); | |
+ Type::smooth_l1_loss_out(output, self, target, size_average, reduce); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::smooth_l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss"); | |
auto output = Type::smooth_l1_loss(self, target, size_average, reduce); | |
@@ -6066,6 +9647,33 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::smooth_l1_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("smooth_l1_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ check_inplace(output); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<SmoothL1LossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("smooth_l1_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<SmoothL1LossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->smooth_l1_loss_forward_out(output_, self_, target_, size_average, reduce); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_forward_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::smooth_l1_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6089,6 +9697,35 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
+ profiler::RecordFunction profiler("smooth_l1_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& target_ = unpack(target, "target", 3); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<SmoothL1LossBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("smooth_l1_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<SmoothL1LossBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ grad_fn->reduce = reduce; | |
+ } | |
+ baseType->smooth_l1_loss_backward_out(grad_input_, grad_output_, self_, target_, size_average, reduce); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_backward_out", { grad_input, grad_output, self, target }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ setattr(n, jit::stringToSymbol("reduce"), reduce); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6114,6 +9751,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average) const { | |
+ profiler::RecordFunction profiler("soft_margin_loss_out"); | |
+ Type::soft_margin_loss_out(output, self, target, size_average); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::soft_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss"); | |
auto output = Type::soft_margin_loss(self, target, size_average); | |
@@ -6123,6 +9769,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::soft_margin_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, bool size_average) const { | |
+ profiler::RecordFunction profiler("soft_margin_loss_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ check_inplace(output); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<SoftMarginLossBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("soft_margin_loss_forward_out", { output }); | |
+ grad_fn = std::make_shared<SoftMarginLossBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ } | |
+ baseType->soft_margin_loss_forward_out(output_, self_, target_, size_average); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_forward_out", { output, self, target }, {output} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::soft_margin_loss_forward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6144,6 +9815,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & self, const Tensor & target, bool size_average) const { | |
+ profiler::RecordFunction profiler("soft_margin_loss_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& target_ = unpack(target, "target", 2); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(target, "target"); | |
+ std::shared_ptr<SoftMarginLossBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("soft_margin_loss_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<SoftMarginLossBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->target_ = SavedVariable(target, false); | |
+ grad_fn->size_average = size_average; | |
+ } | |
+ baseType->soft_margin_loss_backward_out(grad_input_, self_, target_, size_average); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, self, target )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_backward_out", { grad_input, self, target }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("size_average"), size_average); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::soft_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6165,6 +9861,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale) const { | |
+ profiler::RecordFunction profiler("elu_out"); | |
+ Type::elu_out(output, self, alpha, scale); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "elu_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ setattr(n, jit::stringToSymbol("scale"), scale); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::elu(const Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu"); | |
auto output = Type::elu(self, alpha, scale); | |
@@ -6175,6 +9881,32 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::elu_forward_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale) const { | |
+ profiler::RecordFunction profiler("elu_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<EluBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("elu_forward_out", { output }); | |
+ grad_fn = std::make_shared<EluBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->scale = scale; | |
+ } | |
+ baseType->elu_forward_out(output_, self_, alpha, scale); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "elu_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ setattr(n, jit::stringToSymbol("scale"), scale); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::elu_forward(const Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6197,6 +9929,32 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, const Tensor & output) const { | |
+ profiler::RecordFunction profiler("elu_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& output_ = unpack(output, "output", 4); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<EluBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, output })) { | |
+ check_output_args("elu_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<EluBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
+ grad_fn->alpha = alpha; | |
+ grad_fn->scale = scale; | |
+ grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->elu_backward_out(grad_input_, grad_output_, alpha, scale, output_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "elu_backward_out", { grad_input, grad_output, output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("alpha"), alpha); | |
+ setattr(n, jit::stringToSymbol("scale"), scale); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, const Tensor & output) const { | |
profiler::RecordFunction profiler("elu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6253,6 +10011,15 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::glu_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("glu_out"); | |
+ Type::glu_out(output, self, dim); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "glu_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::glu(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu"); | |
auto output = Type::glu(self, dim); | |
@@ -6262,6 +10029,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::glu_forward_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("glu_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<GluBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("glu_forward_out", { output }); | |
+ grad_fn = std::make_shared<GluBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->glu_forward_out(output_, self_, dim); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "glu_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::glu_forward(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6280,6 +10069,30 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("glu_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<GluBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("glu_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<GluBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->glu_backward_out(grad_input_, grad_output_, self_, dim); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "glu_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6300,6 +10113,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::hardshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
+ profiler::RecordFunction profiler("hardshrink_out"); | |
+ Type::hardshrink_out(output, self, lambd); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "hardshrink_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("lambd"), lambd); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::hardshrink(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink"); | |
auto output = Type::hardshrink(self, lambd); | |
@@ -6309,6 +10131,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::hardshrink_forward_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
+ profiler::RecordFunction profiler("hardshrink_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<HardshrinkBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("hardshrink_forward_out", { output }); | |
+ grad_fn = std::make_shared<HardshrinkBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->lambd = lambd; | |
+ } | |
+ baseType->hardshrink_forward_out(output_, self_, lambd); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "hardshrink_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("lambd"), lambd); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::hardshrink_forward(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6327,6 +10171,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::hardshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
+ profiler::RecordFunction profiler("hardshrink_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<HardshrinkBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("hardshrink_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<HardshrinkBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->lambd = lambd; | |
+ } | |
+ baseType->hardshrink_backward_out(grad_input_, grad_output_, self_, lambd); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "hardshrink_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("lambd"), lambd); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::hardshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6346,6 +10213,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+ profiler::RecordFunction profiler("hardtanh_out"); | |
+ Type::hardtanh_out(output, self, min_val, max_val); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "hardtanh_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("min_val"), min_val); | |
+ setattr(n, jit::stringToSymbol("max_val"), max_val); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh"); | |
auto output = Type::hardtanh(self, min_val, max_val); | |
@@ -6356,6 +10233,30 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::hardtanh_forward_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+ profiler::RecordFunction profiler("hardtanh_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<HardtanhBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("hardtanh_forward_out", { output }); | |
+ grad_fn = std::make_shared<HardtanhBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->min_val = min_val; | |
+ grad_fn->max_val = max_val; | |
+ } | |
+ baseType->hardtanh_forward_out(output_, self_, min_val, max_val); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "hardtanh_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("min_val"), min_val); | |
+ setattr(n, jit::stringToSymbol("max_val"), max_val); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::hardtanh_forward(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6376,6 +10277,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
+ profiler::RecordFunction profiler("hardtanh_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<HardtanhBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("hardtanh_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<HardtanhBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->min_val = min_val; | |
+ grad_fn->max_val = max_val; | |
+ } | |
+ baseType->hardtanh_backward_out(grad_input_, grad_output_, self_, min_val, max_val); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "hardtanh_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("min_val"), min_val); | |
+ setattr(n, jit::stringToSymbol("max_val"), max_val); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6431,6 +10357,15 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) const { | |
+ profiler::RecordFunction profiler("leaky_relu_out"); | |
+ Type::leaky_relu_out(output, self, negative_slope); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "leaky_relu_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::leaky_relu(const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu"); | |
auto output = Type::leaky_relu(self, negative_slope); | |
@@ -6440,6 +10375,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::leaky_relu_forward_out(Tensor & output, const Tensor & self, Scalar negative_slope) const { | |
+ profiler::RecordFunction profiler("leaky_relu_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<LeakyReluBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("leaky_relu_forward_out", { output }); | |
+ grad_fn = std::make_shared<LeakyReluBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->negative_slope = negative_slope; | |
+ } | |
+ baseType->leaky_relu_forward_out(output_, self_, negative_slope); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "leaky_relu_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::leaky_relu_forward(const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6458,6 +10415,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const { | |
+ profiler::RecordFunction profiler("leaky_relu_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<LeakyReluBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("leaky_relu_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<LeakyReluBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->negative_slope = negative_slope; | |
+ } | |
+ baseType->leaky_relu_backward_out(grad_input_, grad_output_, self_, negative_slope); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "leaky_relu_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6508,6 +10488,15 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::log_sigmoid_out(Tensor & output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("log_sigmoid_out"); | |
+ Type::log_sigmoid_out(output, self); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_out", { output, self }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::log_sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("log_sigmoid"); | |
auto output = Type::log_sigmoid(self); | |
@@ -6517,6 +10506,31 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("log_sigmoid_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& buffer_ = unpack(buffer, "buffer", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<LogSigmoidBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("log_sigmoid_forward_out", { output }); | |
+ grad_fn = std::make_shared<LogSigmoidBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->log_sigmoid_forward_out(output_, buffer_, self_); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, buffer, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_forward_out", { output, buffer, self }, {output, buffer} ); | |
+ (void)n; | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->buffer_ = SavedVariable(buffer, true); | |
+ } | |
+ return std::forward_as_tuple(output, buffer); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::log_sigmoid_forward(const Tensor & self) const { | |
profiler::RecordFunction profiler("log_sigmoid_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6538,6 +10552,32 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(buffer)); | |
} | |
+Tensor & VariableType::log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const { | |
+ profiler::RecordFunction profiler("log_sigmoid_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& buffer_ = unpack(buffer, "buffer", 3); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(buffer, "buffer"); | |
+ std::shared_ptr<LogSigmoidBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("log_sigmoid_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<LogSigmoidBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->buffer_ = SavedVariable(buffer, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->log_sigmoid_backward_out(grad_input_, grad_output_, self_, buffer_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, buffer )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_backward_out", { grad_input, grad_output, self, buffer }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const { | |
profiler::RecordFunction profiler("log_sigmoid_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6560,6 +10600,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::log_softmax_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("log_softmax_out"); | |
+ Type::log_softmax_out(output, self, dim); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_softmax_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::log_softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("log_softmax"); | |
auto output = Type::log_softmax(self, dim); | |
@@ -6569,6 +10618,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::log_softmax_forward_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("log_softmax_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<LogSoftmaxBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("log_softmax_forward_out", { output }); | |
+ grad_fn = std::make_shared<LogSoftmaxBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->log_softmax_forward_out(output_, self_, dim); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_softmax_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::log_softmax_forward(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("log_softmax_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6590,6 +10664,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::log_softmax_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
+ profiler::RecordFunction profiler("log_softmax_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& output_ = unpack(output, "output", 4); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<LogSoftmaxBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("log_softmax_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<LogSoftmaxBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->dim = dim; | |
+ grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->log_softmax_backward_out(grad_input_, grad_output_, self_, dim, output_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "log_softmax_backward_out", { grad_input, grad_output, self, output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::log_softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
profiler::RecordFunction profiler("log_softmax_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6611,6 +10710,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::prelu_out(Tensor & output, const Tensor & self, const Tensor & weight) const { | |
+ profiler::RecordFunction profiler("prelu_out"); | |
+ Type::prelu_out(output, self, weight); | |
+ if (jit::tracer::isTracing( output, self, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "prelu_out", { output, self, weight }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::prelu(const Tensor & self, const Tensor & weight) const { | |
profiler::RecordFunction profiler("prelu"); | |
auto output = Type::prelu(self, weight); | |
@@ -6620,6 +10728,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::prelu_forward_out(Tensor & output, const Tensor & self, const Tensor & weight) const { | |
+ profiler::RecordFunction profiler("prelu_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& weight_ = unpack(weight, "weight", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<PreluBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight })) { | |
+ check_output_args("prelu_forward_out", { output }); | |
+ grad_fn = std::make_shared<PreluBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ } | |
+ baseType->prelu_forward_out(output_, self_, weight_); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "prelu_forward_out", { output, self, weight }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::prelu_forward(const Tensor & self, const Tensor & weight) const { | |
profiler::RecordFunction profiler("prelu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6639,6 +10770,31 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::prelu_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight) const { | |
+ profiler::RecordFunction profiler("prelu_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight })) { | |
+ check_output_args("prelu_backward_out", { grad_input, grad_weight }); | |
+ grad_fn = std::make_shared<Error>("the derivative for prelu_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
+ } | |
+ baseType->prelu_backward_out(grad_input_, grad_weight_, grad_output_, self_, weight_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ rebase_history({ grad_input, grad_weight }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_output, self, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "prelu_backward_out", { grad_input, grad_weight, grad_output, self, weight }, {grad_input, grad_weight} ); | |
+ (void)n; | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, std::array<bool,2> output_mask) const { | |
profiler::RecordFunction profiler("prelu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6661,11 +10817,39 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight)); | |
} | |
+Tensor & VariableType::rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rrelu_with_noise_out"); | |
+ Type::rrelu_with_noise_out(output, self, noise, lower, upper, training, generator); | |
+ return output; | |
+} | |
Tensor VariableType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise"); | |
auto output = Type::rrelu_with_noise(self, noise, lower, upper, training, generator); | |
return output; | |
} | |
+Tensor & VariableType::rrelu_with_noise_forward_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
+ profiler::RecordFunction profiler("rrelu_with_noise_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& noise_ = unpack(noise, "noise", 2); | |
+ check_inplace(output); | |
+ check_no_requires_grad(noise, "noise"); | |
+ std::shared_ptr<RreluWithNoiseBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("rrelu_with_noise_forward_out", { output }); | |
+ grad_fn = std::make_shared<RreluWithNoiseBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->noise_ = SavedVariable(noise, false); | |
+ grad_fn->lower = lower; | |
+ grad_fn->upper = upper; | |
+ grad_fn->training = training; | |
+ } | |
+ baseType->rrelu_with_noise_forward_out(output_, self_, noise_, lower, upper, training, generator); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ return output; | |
+} | |
Tensor VariableType::rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6685,6 +10869,36 @@ | |
set_history(output, grad_fn); | |
return output; | |
} | |
+Tensor & VariableType::rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const { | |
+ profiler::RecordFunction profiler("rrelu_with_noise_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& noise_ = unpack(noise, "noise", 3); | |
+ check_inplace(grad_input); | |
+ check_no_requires_grad(noise, "noise"); | |
+ std::shared_ptr<RreluWithNoiseBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("rrelu_with_noise_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<RreluWithNoiseBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->noise_ = SavedVariable(noise, false); | |
+ grad_fn->lower = lower; | |
+ grad_fn->upper = upper; | |
+ grad_fn->training = training; | |
+ } | |
+ baseType->rrelu_with_noise_backward_out(grad_input_, grad_output_, self_, noise_, lower, upper, training); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, noise )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "rrelu_with_noise_backward_out", { grad_input, grad_output, self, noise }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("lower"), lower); | |
+ setattr(n, jit::stringToSymbol("upper"), upper); | |
+ setattr(n, jit::stringToSymbol("training"), training); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6739,6 +10953,15 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::softmax_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("softmax_out"); | |
+ Type::softmax_out(output, self, dim); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softmax_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("softmax"); | |
auto output = Type::softmax(self, dim); | |
@@ -6748,6 +10971,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::softmax_forward_out(Tensor & output, const Tensor & self, int64_t dim) const { | |
+ profiler::RecordFunction profiler("softmax_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<SoftmaxBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("softmax_forward_out", { output }); | |
+ grad_fn = std::make_shared<SoftmaxBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ } | |
+ baseType->softmax_forward_out(output_, self_, dim); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softmax_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::softmax_forward(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("softmax_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6769,6 +11017,32 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::softmax_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
+ profiler::RecordFunction profiler("softmax_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& output_ = unpack(output, "output", 4); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<SoftmaxBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("softmax_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<SoftmaxBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->dim = dim; | |
+ grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->softmax_backward_out(grad_input_, grad_output_, self_, dim, output_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softmax_backward_out", { grad_input, grad_output, self, output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("dim"), dim); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
profiler::RecordFunction profiler("softmax_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6791,6 +11065,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const { | |
+ profiler::RecordFunction profiler("softplus_out"); | |
+ Type::softplus_out(output, self, beta, threshold); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softplus_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("threshold"), threshold); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::softplus(const Tensor & self, Scalar beta, Scalar threshold) const { | |
profiler::RecordFunction profiler("softplus"); | |
auto output = Type::softplus(self, beta, threshold); | |
@@ -6801,6 +11085,33 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::softplus_forward_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) const { | |
+ profiler::RecordFunction profiler("softplus_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<SoftplusBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("softplus_forward_out", { output }); | |
+ grad_fn = std::make_shared<SoftplusBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->beta = beta; | |
+ grad_fn->threshold = threshold; | |
+ } | |
+ baseType->softplus_forward_out(output_, self_, beta, threshold); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softplus_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("threshold"), threshold); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->output_ = SavedVariable(output, true); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::softplus_forward(const Tensor & self, Scalar beta, Scalar threshold) const { | |
profiler::RecordFunction profiler("softplus_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6824,6 +11135,34 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
+ profiler::RecordFunction profiler("softplus_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& output_ = unpack(output, "output", 5); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<SoftplusBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("softplus_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<SoftplusBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->beta = beta; | |
+ grad_fn->threshold = threshold; | |
+ grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->softplus_backward_out(grad_input_, grad_output_, self_, beta, threshold, output_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softplus_backward_out", { grad_input, grad_output, self, output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("beta"), beta); | |
+ setattr(n, jit::stringToSymbol("threshold"), threshold); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
profiler::RecordFunction profiler("softplus_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6848,6 +11187,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
+ profiler::RecordFunction profiler("softshrink_out"); | |
+ Type::softshrink_out(output, self, lambd); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softshrink_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("lambd"), lambd); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::softshrink(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink"); | |
auto output = Type::softshrink(self, lambd); | |
@@ -6857,6 +11205,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::softshrink_forward_out(Tensor & output, const Tensor & self, Scalar lambd) const { | |
+ profiler::RecordFunction profiler("softshrink_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<SoftshrinkBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("softshrink_forward_out", { output }); | |
+ grad_fn = std::make_shared<SoftshrinkBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->lambd = lambd; | |
+ } | |
+ baseType->softshrink_forward_out(output_, self_, lambd); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softshrink_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("lambd"), lambd); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::softshrink_forward(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6875,6 +11245,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
+ profiler::RecordFunction profiler("softshrink_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<SoftshrinkBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("softshrink_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<SoftshrinkBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->lambd = lambd; | |
+ } | |
+ baseType->softshrink_backward_out(grad_input_, grad_output_, self_, lambd); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "softshrink_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("lambd"), lambd); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6894,6 +11287,16 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::threshold_out(Tensor & output, const Tensor & self, Scalar threshold, Scalar value) const { | |
+ profiler::RecordFunction profiler("threshold_out"); | |
+ Type::threshold_out(output, self, threshold, value); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "threshold_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("threshold"), threshold); | |
+ setattr(n, jit::stringToSymbol("value"), value); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::threshold(const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold"); | |
auto output = Type::threshold(self, threshold, value); | |
@@ -6904,6 +11307,30 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::threshold_forward_out(Tensor & output, const Tensor & self, Scalar threshold, Scalar value) const { | |
+ profiler::RecordFunction profiler("threshold_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThresholdBackward0> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("threshold_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThresholdBackward0>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->threshold = threshold; | |
+ grad_fn->value = value; | |
+ } | |
+ baseType->threshold_forward_out(output_, self_, threshold, value); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "threshold_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("threshold"), threshold); | |
+ setattr(n, jit::stringToSymbol("value"), value); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::threshold_forward(const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -6924,6 +11351,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::threshold_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar threshold, Scalar value) const { | |
+ profiler::RecordFunction profiler("threshold_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<ThresholdBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("threshold_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<ThresholdBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->threshold = threshold; | |
+ grad_fn->value = value; | |
+ } | |
+ baseType->threshold_backward_out(grad_input_, grad_output_, self_, threshold, value); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "threshold_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("threshold"), threshold); | |
+ setattr(n, jit::stringToSymbol("value"), value); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -6979,6 +11431,15 @@ | |
} | |
return self; | |
} | |
+Tensor & VariableType::adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_avg_pool2d_out"); | |
+ Type::adaptive_avg_pool2d_out(output, self, output_size); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::adaptive_avg_pool2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d"); | |
auto output = Type::adaptive_avg_pool2d(self, output_size); | |
@@ -6988,6 +11449,27 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::adaptive_avg_pool2d_forward_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_avg_pool2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<AdaptiveAvgPool2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("adaptive_avg_pool2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<AdaptiveAvgPool2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->adaptive_avg_pool2d_forward_out(output_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::adaptive_avg_pool2d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7005,6 +11487,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::adaptive_avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("adaptive_avg_pool2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<AdaptiveAvgPool2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("adaptive_avg_pool2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<AdaptiveAvgPool2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->adaptive_avg_pool2d_backward_out(grad_input_, grad_output_, self_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7024,6 +11529,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_avg_pool3d_out"); | |
+ Type::adaptive_avg_pool3d_out(output, self, output_size); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::adaptive_avg_pool3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d"); | |
auto output = Type::adaptive_avg_pool3d(self, output_size); | |
@@ -7033,6 +11547,27 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::adaptive_avg_pool3d_forward_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_avg_pool3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<AdaptiveAvgPool3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("adaptive_avg_pool3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<AdaptiveAvgPool3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->adaptive_avg_pool3d_forward_out(output_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::adaptive_avg_pool3d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7050,6 +11585,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("adaptive_avg_pool3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<AdaptiveAvgPool3DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("adaptive_avg_pool3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<AdaptiveAvgPool3DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->adaptive_avg_pool3d_backward_out(grad_input_, grad_output_, self_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7069,6 +11627,15 @@ | |
} | |
return grad_input; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::adaptive_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_max_pool2d_out"); | |
+ Type::adaptive_max_pool2d_out(output, indices, self, output_size); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d"); | |
Tensor output, indices; | |
@@ -7079,6 +11646,31 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::adaptive_max_pool2d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_max_pool2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<AdaptiveMaxPool2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("adaptive_max_pool2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<AdaptiveMaxPool2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->adaptive_max_pool2d_forward_out(output_, indices_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_forward_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7100,6 +11692,31 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+Tensor & VariableType::adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
+ profiler::RecordFunction profiler("adaptive_max_pool2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 3); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<AdaptiveMaxPool2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("adaptive_max_pool2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<AdaptiveMaxPool2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->adaptive_max_pool2d_backward_out(grad_input_, grad_output_, self_, indices_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7121,6 +11738,15 @@ | |
} | |
return grad_input; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::adaptive_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_max_pool3d_out"); | |
+ Type::adaptive_max_pool3d_out(output, indices, self, output_size); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d"); | |
Tensor output, indices; | |
@@ -7131,6 +11757,31 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::adaptive_max_pool3d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("adaptive_max_pool3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<AdaptiveMaxPool3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("adaptive_max_pool3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<AdaptiveMaxPool3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ } | |
+ baseType->adaptive_max_pool3d_forward_out(output_, indices_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_forward_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7152,6 +11803,31 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+Tensor & VariableType::adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
+ profiler::RecordFunction profiler("adaptive_max_pool3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 3); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<AdaptiveMaxPool3DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("adaptive_max_pool3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<AdaptiveMaxPool3DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->adaptive_max_pool3d_backward_out(grad_input_, grad_output_, self_, indices_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7173,6 +11849,19 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::avg_pool2d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
+ profiler::RecordFunction profiler("avg_pool2d_out"); | |
+ Type::avg_pool2d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d"); | |
auto output = Type::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -7186,6 +11875,36 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::avg_pool2d_forward_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
+ profiler::RecordFunction profiler("avg_pool2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<AvgPool2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("avg_pool2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<AvgPool2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->ceil_mode = ceil_mode; | |
+ grad_fn->count_include_pad = count_include_pad; | |
+ } | |
+ baseType->avg_pool2d_forward_out(output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::avg_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7212,6 +11931,37 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
+ profiler::RecordFunction profiler("avg_pool2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<AvgPool2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("avg_pool2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<AvgPool2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->ceil_mode = ceil_mode; | |
+ grad_fn->count_include_pad = count_include_pad; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->avg_pool2d_backward_out(grad_input_, grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7239,6 +11989,19 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::avg_pool3d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
+ profiler::RecordFunction profiler("avg_pool3d_out"); | |
+ Type::avg_pool3d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d"); | |
auto output = Type::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
@@ -7252,6 +12015,36 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::avg_pool3d_forward_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
+ profiler::RecordFunction profiler("avg_pool3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<AvgPool3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("avg_pool3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<AvgPool3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->ceil_mode = ceil_mode; | |
+ grad_fn->count_include_pad = count_include_pad; | |
+ } | |
+ baseType->avg_pool3d_forward_out(output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::avg_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7278,6 +12071,37 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
+ profiler::RecordFunction profiler("avg_pool3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<AvgPool3DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("avg_pool3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<AvgPool3DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->ceil_mode = ceil_mode; | |
+ grad_fn->count_include_pad = count_include_pad; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->avg_pool3d_backward_out(grad_input_, grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7305,6 +12129,16 @@ | |
} | |
return grad_input; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
+ profiler::RecordFunction profiler("fractional_max_pool2d_out"); | |
+ Type::fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples); | |
+ if (jit::tracer::isTracing( output, indices, self, random_samples )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_out", { output, indices, self, random_samples }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d"); | |
Tensor output, indices; | |
@@ -7316,6 +12150,36 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::fractional_max_pool2d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
+ profiler::RecordFunction profiler("fractional_max_pool2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& random_samples_ = unpack(random_samples, "random_samples", 5); | |
+ check_inplace(output); | |
+ check_no_requires_grad(random_samples, "random_samples"); | |
+ std::shared_ptr<FractionalMaxPool2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("fractional_max_pool2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<FractionalMaxPool2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->fractional_max_pool2d_forward_out(output_, indices_, self_, kernel_size, output_size, random_samples_); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, indices, self, random_samples )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_forward_out", { output, indices, self, random_samples }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d_forward(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7342,6 +12206,32 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+Tensor & VariableType::fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) const { | |
+ profiler::RecordFunction profiler("fractional_max_pool2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 5); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<FractionalMaxPool2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("fractional_max_pool2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<FractionalMaxPool2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->fractional_max_pool2d_backward_out(grad_input_, grad_output_, self_, kernel_size, output_size, indices_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7364,6 +12254,19 @@ | |
} | |
return grad_input; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
+ profiler::RecordFunction profiler("max_pool2d_out"); | |
+ Type::max_pool2d_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_pool2d_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::max_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool2d"); | |
Tensor output, indices; | |
@@ -7378,6 +12281,40 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::max_pool2d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
+ profiler::RecordFunction profiler("max_pool2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<MaxPool2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("max_pool2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<MaxPool2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->dilation = dilation; | |
+ grad_fn->ceil_mode = ceil_mode; | |
+ } | |
+ baseType->max_pool2d_forward_out(output_, indices_, self_, kernel_size, stride, padding, dilation, ceil_mode); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_pool2d_forward_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::max_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7408,6 +12345,34 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+Tensor & VariableType::max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
+ profiler::RecordFunction profiler("max_pool2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 8); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<MaxPool2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("max_pool2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<MaxPool2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->max_pool2d_backward_out(grad_input_, grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_pool2d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
profiler::RecordFunction profiler("max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7432,6 +12397,19 @@ | |
} | |
return grad_input; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
+ profiler::RecordFunction profiler("max_pool3d_out"); | |
+ Type::max_pool3d_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_pool3d_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::max_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool3d"); | |
Tensor output, indices; | |
@@ -7446,6 +12424,40 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::max_pool3d_forward_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
+ profiler::RecordFunction profiler("max_pool3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& indices_ = unpack_long(indices, "indices", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<MaxPool3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("max_pool3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<MaxPool3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->dilation = dilation; | |
+ grad_fn->ceil_mode = ceil_mode; | |
+ } | |
+ baseType->max_pool3d_forward_out(output_, indices_, self_, kernel_size, stride, padding, dilation, ceil_mode); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, indices, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_pool3d_forward_out", { output, indices, self }, {output, indices} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->indices_ = SavedVariable(indices, true); | |
+ } | |
+ return std::forward_as_tuple(output, indices); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::max_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7476,16 +12488,41 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
+Tensor & VariableType::max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
+ profiler::RecordFunction profiler("max_pool3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 8); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("max_pool3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<Error>("the derivative for max_pool3d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ } | |
+ baseType->max_pool3d_backward_out(grad_input_, grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_pool3d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
profiler::RecordFunction profiler("max_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 7); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ grad_output, self, indices })) { | |
+ if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<Error>("the derivative for max_pool3d_backward is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ grad_output, self, indices }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
} | |
auto grad_input = as_variable(baseType->max_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_)); | |
set_history(grad_input, grad_fn); | |
@@ -7499,6 +12536,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size) const { | |
+ profiler::RecordFunction profiler("max_unpool2d_out"); | |
+ Type::max_unpool2d_out(output, self, indices, output_size); | |
+ if (jit::tracer::isTracing( output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_out", { output, self, indices }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d"); | |
auto output = Type::max_unpool2d(self, indices, output_size); | |
@@ -7508,6 +12554,30 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::max_unpool2d_forward_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size) const { | |
+ profiler::RecordFunction profiler("max_unpool2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& indices_ = unpack_long(indices, "indices", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<MaxUnpool2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("max_unpool2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<MaxUnpool2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->max_unpool2d_forward_out(output_, self_, indices_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_forward_out", { output, self, indices }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::max_unpool2d_forward(const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7528,6 +12598,31 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) const { | |
+ profiler::RecordFunction profiler("max_unpool2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 3); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<MaxUnpool2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("max_unpool2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<MaxUnpool2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->output_size = output_size; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->max_unpool2d_backward_out(grad_input_, grad_output_, self_, indices_, output_size); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7549,6 +12644,17 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("max_unpool3d_out"); | |
+ Type::max_unpool3d_out(output, self, indices, output_size, stride, padding); | |
+ if (jit::tracer::isTracing( output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_out", { output, self, indices }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d"); | |
auto output = Type::max_unpool3d(self, indices, output_size, stride, padding); | |
@@ -7560,6 +12666,34 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::max_unpool3d_forward_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("max_unpool3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& indices_ = unpack_long(indices, "indices", 2); | |
+ check_inplace(output); | |
+ std::shared_ptr<MaxUnpool3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("max_unpool3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<MaxUnpool3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->indices_ = SavedVariable(indices, false); | |
+ grad_fn->output_size = output_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->max_unpool3d_forward_out(output_, self_, indices_, output_size, stride, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_forward_out", { output, self, indices }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::max_unpool3d_forward(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7584,16 +12718,39 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("max_unpool3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ auto& indices_ = unpack_long(indices, "indices", 3); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("max_unpool3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<Error>("the derivative for max_unpool3d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ } | |
+ baseType->max_unpool3d_backward_out(grad_input_, grad_output_, self_, indices_, output_size, stride, padding); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self, indices )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_backward_out", { grad_input, grad_output, self, indices }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ grad_output, self, indices })) { | |
+ if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<Error>("the derivative for max_unpool3d_backward is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ grad_output, self, indices }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
} | |
auto grad_input = as_variable(baseType->max_unpool3d_backward(grad_output_, self_, indices_, output_size, stride, padding)); | |
set_history(grad_input, grad_fn); | |
@@ -7605,6 +12762,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::reflection_pad1d_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("reflection_pad1d_out"); | |
+ Type::reflection_pad1d_out(output, self, padding); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::reflection_pad1d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d"); | |
auto output = Type::reflection_pad1d(self, padding); | |
@@ -7614,6 +12780,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::reflection_pad1d_forward_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("reflection_pad1d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<ReflectionPad1DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("reflection_pad1d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ReflectionPad1DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->reflection_pad1d_forward_out(output_, self_, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::reflection_pad1d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7632,6 +12820,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("reflection_pad1d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<ReflectionPad1DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("reflection_pad1d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<ReflectionPad1DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->padding = padding; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->reflection_pad1d_backward_out(grad_input_, grad_output_, self_, padding); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7651,6 +12862,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::reflection_pad2d_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("reflection_pad2d_out"); | |
+ Type::reflection_pad2d_out(output, self, padding); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::reflection_pad2d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d"); | |
auto output = Type::reflection_pad2d(self, padding); | |
@@ -7660,6 +12880,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::reflection_pad2d_forward_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("reflection_pad2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<ReflectionPad2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("reflection_pad2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ReflectionPad2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->reflection_pad2d_forward_out(output_, self_, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::reflection_pad2d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7678,6 +12920,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("reflection_pad2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<ReflectionPad2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("reflection_pad2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<ReflectionPad2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->padding = padding; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->reflection_pad2d_backward_out(grad_input_, grad_output_, self_, padding); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7697,6 +12962,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::replication_pad1d_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad1d_out"); | |
+ Type::replication_pad1d_out(output, self, padding); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::replication_pad1d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d"); | |
auto output = Type::replication_pad1d(self, padding); | |
@@ -7706,6 +12980,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::replication_pad1d_forward_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad1d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<ReplicationPad1DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("replication_pad1d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ReplicationPad1DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->replication_pad1d_forward_out(output_, self_, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::replication_pad1d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7724,6 +13020,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad1d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<ReplicationPad1DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("replication_pad1d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<ReplicationPad1DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->padding = padding; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->replication_pad1d_backward_out(grad_input_, grad_output_, self_, padding); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7743,6 +13062,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::replication_pad2d_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad2d_out"); | |
+ Type::replication_pad2d_out(output, self, padding); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::replication_pad2d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d"); | |
auto output = Type::replication_pad2d(self, padding); | |
@@ -7752,6 +13080,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::replication_pad2d_forward_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<ReplicationPad2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("replication_pad2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ReplicationPad2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->replication_pad2d_forward_out(output_, self_, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::replication_pad2d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7770,6 +13120,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<ReplicationPad2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("replication_pad2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<ReplicationPad2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->padding = padding; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->replication_pad2d_backward_out(grad_input_, grad_output_, self_, padding); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7789,6 +13162,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::replication_pad3d_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad3d_out"); | |
+ Type::replication_pad3d_out(output, self, padding); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::replication_pad3d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d"); | |
auto output = Type::replication_pad3d(self, padding); | |
@@ -7798,6 +13180,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::replication_pad3d_forward_out(Tensor & output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<ReplicationPad3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("replication_pad3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ReplicationPad3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->replication_pad3d_forward_out(output_, self_, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::replication_pad3d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7816,6 +13220,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
+ profiler::RecordFunction profiler("replication_pad3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<ReplicationPad3DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("replication_pad3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<ReplicationPad3DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->padding = padding; | |
+ grad_fn->self_info = self; | |
+ } | |
+ baseType->replication_pad3d_backward_out(grad_input_, grad_output_, self_, padding); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7835,6 +13262,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::upsample_linear1d_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("upsample_linear1d_out"); | |
+ Type::upsample_linear1d_out(output, self, output_size); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_linear1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d"); | |
auto output = Type::upsample_linear1d(self, output_size); | |
@@ -7844,6 +13280,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_linear1d_forward_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("upsample_linear1d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<UpsampleLinear1DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("upsample_linear1d_forward_out", { output }); | |
+ grad_fn = std::make_shared<UpsampleLinear1DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->upsample_linear1d_forward_out(output_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_linear1d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7862,6 +13320,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
+ profiler::RecordFunction profiler("upsample_linear1d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<UpsampleLinear1DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output })) { | |
+ check_output_args("upsample_linear1d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<UpsampleLinear1DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output }); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->upsample_linear1d_backward_out(grad_input_, grad_output_, output_size, input_size); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_backward_out", { grad_input, grad_output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ setattr(n, jit::stringToSymbol("input_size"), input_size); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7880,6 +13360,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("upsample_bilinear2d_out"); | |
+ Type::upsample_bilinear2d_out(output, self, output_size); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_bilinear2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d"); | |
auto output = Type::upsample_bilinear2d(self, output_size); | |
@@ -7889,6 +13378,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_bilinear2d_forward_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("upsample_bilinear2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<UpsampleBilinear2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("upsample_bilinear2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<UpsampleBilinear2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->upsample_bilinear2d_forward_out(output_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_bilinear2d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7907,6 +13418,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
+ profiler::RecordFunction profiler("upsample_bilinear2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<UpsampleBilinear2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output })) { | |
+ check_output_args("upsample_bilinear2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<UpsampleBilinear2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output }); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->upsample_bilinear2d_backward_out(grad_input_, grad_output_, output_size, input_size); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_backward_out", { grad_input, grad_output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ setattr(n, jit::stringToSymbol("input_size"), input_size); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7925,6 +13458,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("upsample_trilinear3d_out"); | |
+ Type::upsample_trilinear3d_out(output, self, output_size); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_trilinear3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d"); | |
auto output = Type::upsample_trilinear3d(self, output_size); | |
@@ -7934,6 +13476,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_trilinear3d_forward_out(Tensor & output, const Tensor & self, IntList output_size) const { | |
+ profiler::RecordFunction profiler("upsample_trilinear3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<UpsampleTrilinear3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("upsample_trilinear3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<UpsampleTrilinear3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_sizes = self.sizes(); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->upsample_trilinear3d_forward_out(output_, self_, output_size); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_trilinear3d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7952,6 +13516,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
+ profiler::RecordFunction profiler("upsample_trilinear3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<UpsampleTrilinear3DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output })) { | |
+ check_output_args("upsample_trilinear3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<UpsampleTrilinear3DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output }); | |
+ grad_fn->output_size = output_size; | |
+ } | |
+ baseType->upsample_trilinear3d_backward_out(grad_input_, grad_output_, output_size, input_size); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_backward_out", { grad_input, grad_output }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("output_size"), output_size); | |
+ setattr(n, jit::stringToSymbol("input_size"), input_size); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -7970,6 +13556,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::upsample_nearest1d_out(Tensor & output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest1d_out"); | |
+ Type::upsample_nearest1d_out(output, self, scale_factor); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_nearest1d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d"); | |
auto output = Type::upsample_nearest1d(self, scale_factor); | |
@@ -7979,6 +13574,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_nearest1d_forward_out(Tensor & output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest1d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<UpsampleNearest1DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("upsample_nearest1d_forward_out", { output }); | |
+ grad_fn = std::make_shared<UpsampleNearest1DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->scale_factor = scale_factor; | |
+ } | |
+ baseType->upsample_nearest1d_forward_out(output_, self_, scale_factor); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_nearest1d_forward(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -7997,6 +13614,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest1d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<UpsampleNearest1DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("upsample_nearest1d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<UpsampleNearest1DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->scale_factor = scale_factor; | |
+ } | |
+ baseType->upsample_nearest1d_backward_out(grad_input_, grad_output_, self_, scale_factor); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::upsample_nearest1d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8015,6 +13654,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::upsample_nearest2d_out(Tensor & output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest2d_out"); | |
+ Type::upsample_nearest2d_out(output, self, scale_factor); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_nearest2d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d"); | |
auto output = Type::upsample_nearest2d(self, scale_factor); | |
@@ -8024,6 +13672,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_nearest2d_forward_out(Tensor & output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<UpsampleNearest2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("upsample_nearest2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<UpsampleNearest2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->scale_factor = scale_factor; | |
+ } | |
+ baseType->upsample_nearest2d_forward_out(output_, self_, scale_factor); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_nearest2d_forward(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8042,6 +13712,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest2d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<UpsampleNearest2DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("upsample_nearest2d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<UpsampleNearest2DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->scale_factor = scale_factor; | |
+ } | |
+ baseType->upsample_nearest2d_backward_out(grad_input_, grad_output_, self_, scale_factor); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::upsample_nearest2d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8060,6 +13752,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::upsample_nearest3d_out(Tensor & output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest3d_out"); | |
+ Type::upsample_nearest3d_out(output, self, scale_factor); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_nearest3d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d"); | |
auto output = Type::upsample_nearest3d(self, scale_factor); | |
@@ -8069,6 +13770,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_nearest3d_forward_out(Tensor & output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<UpsampleNearest3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("upsample_nearest3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<UpsampleNearest3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->scale_factor = scale_factor; | |
+ } | |
+ baseType->upsample_nearest3d_forward_out(output_, self_, scale_factor); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_forward_out", { output, self }, {output} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::upsample_nearest3d_forward(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8087,6 +13810,28 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
+ profiler::RecordFunction profiler("upsample_nearest3d_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& self_ = unpack(self, "self", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<UpsampleNearest3DBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self })) { | |
+ check_output_args("upsample_nearest3d_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<UpsampleNearest3DBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
+ grad_fn->scale_factor = scale_factor; | |
+ } | |
+ baseType->upsample_nearest3d_backward_out(grad_input_, grad_output_, self_, scale_factor); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_backward_out", { grad_input, grad_output, self }, {grad_input} ); | |
+ setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::upsample_nearest3d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8105,6 +13850,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::_sigmoid_out(Tensor & output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("_sigmoid_out"); | |
+ Type::_sigmoid_out(output, self); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "_sigmoid_out", { output, self }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::_sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("_sigmoid"); | |
auto output = Type::_sigmoid(self); | |
@@ -8114,6 +13868,26 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::_sigmoid_forward_out(Tensor & output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("_sigmoid_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("_sigmoid_forward_out", { output }); | |
+ grad_fn = std::make_shared<Error>("the derivative for _sigmoid_forward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->_sigmoid_forward_out(output_, self_); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "_sigmoid_forward_out", { output, self }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::_sigmoid_forward(const Tensor & self) const { | |
profiler::RecordFunction profiler("_sigmoid_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8131,6 +13904,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
+ profiler::RecordFunction profiler("_sigmoid_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& output_ = unpack(output, "output", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<SigmoidBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, output })) { | |
+ check_output_args("_sigmoid_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<SigmoidBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
+ grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->_sigmoid_backward_out(grad_input_, grad_output_, output_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "_sigmoid_backward_out", { grad_input, grad_output, output }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::_sigmoid_backward(const Tensor & grad_output, const Tensor & output) const { | |
profiler::RecordFunction profiler("_sigmoid_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8150,6 +13946,15 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::_tanh_out(Tensor & output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("_tanh_out"); | |
+ Type::_tanh_out(output, self); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "_tanh_out", { output, self }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::_tanh(const Tensor & self) const { | |
profiler::RecordFunction profiler("_tanh"); | |
auto output = Type::_tanh(self); | |
@@ -8159,6 +13964,26 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::_tanh_forward_out(Tensor & output, const Tensor & self) const { | |
+ profiler::RecordFunction profiler("_tanh_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ check_inplace(output); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ self })) { | |
+ check_output_args("_tanh_forward_out", { output }); | |
+ grad_fn = std::make_shared<Error>("the derivative for _tanh_forward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ self }); | |
+ } | |
+ baseType->_tanh_forward_out(output_, self_); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "_tanh_forward_out", { output, self }, {output} ); | |
+ (void)n; | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::_tanh_forward(const Tensor & self) const { | |
profiler::RecordFunction profiler("_tanh_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8176,6 +14000,29 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::_tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) const { | |
+ profiler::RecordFunction profiler("_tanh_backward_out"); | |
+ auto& grad_input_ = unpack(grad_input, "grad_input", 0); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
+ auto& output_ = unpack(output, "output", 2); | |
+ check_inplace(grad_input); | |
+ std::shared_ptr<TanhBackwardBackward> grad_fn; | |
+ if (compute_requires_grad({ grad_output, output })) { | |
+ check_output_args("_tanh_backward_out", { grad_input }); | |
+ grad_fn = std::make_shared<TanhBackwardBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
+ grad_fn->output_ = SavedVariable(output, false); | |
+ grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
+ } | |
+ baseType->_tanh_backward_out(grad_input_, grad_output_, output_); | |
+ increment_version(grad_input); | |
+ rebase_history(grad_input, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_output, output )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "_tanh_backward_out", { grad_input, grad_output, output }, {grad_input} ); | |
+ (void)n; | |
+ } | |
+ return grad_input; | |
+} | |
Tensor VariableType::_tanh_backward(const Tensor & grad_output, const Tensor & output) const { | |
profiler::RecordFunction profiler("_tanh_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8195,6 +14042,17 @@ | |
} | |
return grad_input; | |
} | |
+Tensor & VariableType::thnn_batch_norm_out(Tensor & output, const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
+ profiler::RecordFunction profiler("thnn_batch_norm_out"); | |
+ Type::thnn_batch_norm_out(output, self, weight, bias, running_mean, running_var, training, momentum, eps); | |
+ if (jit::tracer::isTracing( output, self, weight, bias, running_mean, running_var )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_out", { output, self, weight, bias, running_mean, running_var }, {output} ); | |
+ setattr(n, jit::stringToSymbol("training"), training); | |
+ setattr(n, jit::stringToSymbol("momentum"), momentum); | |
+ setattr(n, jit::stringToSymbol("eps"), eps); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_batch_norm(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
profiler::RecordFunction profiler("thnn_batch_norm"); | |
auto output = Type::thnn_batch_norm(self, weight, bias, running_mean, running_var, training, momentum, eps); | |
@@ -8206,6 +14064,46 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_batch_norm_forward_out(Tensor & output, Tensor & save_mean, Tensor & save_std, const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
+ profiler::RecordFunction profiler("thnn_batch_norm_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& save_mean_ = unpack(save_mean, "save_mean", 1); | |
+ auto& save_std_ = unpack(save_std, "save_std", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto weight_ = unpack_opt(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 5); | |
+ auto& running_mean_ = unpack(running_mean, "running_mean", 6); | |
+ auto& running_var_ = unpack(running_var, "running_var", 7); | |
+ check_inplace(output); | |
+ check_no_requires_grad(running_mean, "running_mean"); | |
+ check_no_requires_grad(running_var, "running_var"); | |
+ std::shared_ptr<ThnnBatchNormBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_batch_norm_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnBatchNormBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
+ grad_fn->running_var_ = SavedVariable(running_var, false); | |
+ grad_fn->training = training; | |
+ grad_fn->eps = eps; | |
+ } | |
+ baseType->thnn_batch_norm_forward_out(output_, save_mean_, save_std_, self_, weight_, bias_, running_mean_, running_var_, training, momentum, eps); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, save_mean, save_std, self, weight, bias, running_mean, running_var )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_forward_out", { output, save_mean, save_std, self, weight, bias, running_mean, running_var }, {output, save_mean, save_std} ); | |
+ setattr(n, jit::stringToSymbol("training"), training); | |
+ setattr(n, jit::stringToSymbol("momentum"), momentum); | |
+ setattr(n, jit::stringToSymbol("eps"), eps); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->save_mean_ = SavedVariable(save_mean, true); | |
+ grad_fn->save_std_ = SavedVariable(save_std, true); | |
+ } | |
+ return std::forward_as_tuple(output, save_mean, save_std); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_forward(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
profiler::RecordFunction profiler("thnn_batch_norm_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8241,6 +14139,39 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(save_mean), std::move(save_std)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_batch_norm_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, bool training, double eps, const Tensor & save_mean, const Tensor & save_std) const { | |
+ profiler::RecordFunction profiler("thnn_batch_norm_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto weight_ = unpack_opt(weight, "weight", 5); | |
+ auto& running_mean_ = unpack(running_mean, "running_mean", 6); | |
+ auto& running_var_ = unpack(running_var, "running_var", 7); | |
+ auto& save_mean_ = unpack(save_mean, "save_mean", 10); | |
+ auto& save_std_ = unpack(save_std, "save_std", 11); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, running_mean, running_var, save_mean, save_std })) { | |
+ check_output_args("thnn_batch_norm_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_batch_norm_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, running_mean, running_var, save_mean, save_std }); | |
+ } | |
+ baseType->thnn_batch_norm_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, running_mean_, running_var_, training, eps, save_mean_, save_std_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, running_mean, running_var, save_mean, save_std )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, running_mean, running_var, save_mean, save_std }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("training"), training); | |
+ setattr(n, jit::stringToSymbol("eps"), eps); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, bool training, double eps, const Tensor & save_mean, const Tensor & save_std, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_batch_norm_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8277,6 +14208,19 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
+Tensor & VariableType::thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_transpose2d_out"); | |
+ Type::thnn_conv_transpose2d_out(output, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d"); | |
auto output = Type::thnn_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
@@ -8290,6 +14234,45 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_transpose2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& columns_ = unpack(columns, "columns", 1); | |
+ auto& ones_ = unpack(ones, "ones", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 6); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConvTranspose2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv_transpose2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConvTranspose2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->output_padding = output_padding; | |
+ grad_fn->dilation = dilation; | |
+ } | |
+ baseType->thnn_conv_transpose2d_forward_out(output_, columns_, ones_, self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, columns, ones, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_forward_out", { output, columns, ones, self, weight, bias }, {output, columns, ones} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->columns_ = SavedVariable(columns, true); | |
+ grad_fn->ones_ = SavedVariable(ones, true); | |
+ } | |
+ return std::forward_as_tuple(output, columns, ones); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8324,6 +14307,40 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(columns), std::move(ones)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones) const { | |
+ profiler::RecordFunction profiler("thnn_conv_transpose2d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto& weight_ = unpack(weight, "weight", 5); | |
+ auto& columns_ = unpack(columns, "columns", 11); | |
+ auto& ones_ = unpack(ones, "ones", 12); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, columns, ones })) { | |
+ check_output_args("thnn_conv_transpose2d_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv_transpose2d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, columns, ones }); | |
+ } | |
+ baseType->thnn_conv_transpose2d_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, kernel_size, stride, padding, output_padding, dilation, columns_, ones_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8359,6 +14376,18 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
+Tensor & VariableType::thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_transpose3d_out"); | |
+ Type::thnn_conv_transpose3d_out(output, self, weight, bias, stride, padding, output_padding, dilation); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d"); | |
auto output = Type::thnn_conv_transpose3d(self, weight, bias, stride, padding, output_padding, dilation); | |
@@ -8371,6 +14400,43 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_transpose3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& finput_ = unpack(finput, "finput", 1); | |
+ auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 5); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConvTranspose3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv_transpose3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConvTranspose3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->output_padding = output_padding; | |
+ grad_fn->dilation = dilation; | |
+ } | |
+ baseType->thnn_conv_transpose3d_forward_out(output_, finput_, fgrad_input_, self_, weight_, bias_, stride, padding, output_padding, dilation); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, finput, fgrad_input, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_forward_out", { output, finput, fgrad_input, self, weight, bias }, {output, finput, fgrad_input} ); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->finput_ = SavedVariable(finput, true); | |
+ grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true); | |
+ } | |
+ return std::forward_as_tuple(output, finput, fgrad_input); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8403,6 +14469,39 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input) const { | |
+ profiler::RecordFunction profiler("thnn_conv_transpose3d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto& weight_ = unpack(weight, "weight", 5); | |
+ auto& finput_ = unpack(finput, "finput", 10); | |
+ auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 11); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, finput, fgrad_input })) { | |
+ check_output_args("thnn_conv_transpose3d_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv_transpose3d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, finput, fgrad_input }); | |
+ } | |
+ baseType->thnn_conv_transpose3d_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, stride, padding, output_padding, dilation, finput_, fgrad_input_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8437,6 +14536,17 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
+Tensor & VariableType::thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("thnn_conv2d_out"); | |
+ Type::thnn_conv2d_out(output, self, weight, kernel_size, bias, stride, padding); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv2d"); | |
auto output = Type::thnn_conv2d(self, weight, kernel_size, bias, stride, padding); | |
@@ -8448,6 +14558,41 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("thnn_conv2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& finput_ = unpack(finput, "finput", 1); | |
+ auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 6); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConv2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConv2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->thnn_conv2d_forward_out(output_, finput_, fgrad_input_, self_, weight_, kernel_size, bias_, stride, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, finput, fgrad_input, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_forward_out", { output, finput, fgrad_input, self, weight, bias }, {output, finput, fgrad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->finput_ = SavedVariable(finput, true); | |
+ grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true); | |
+ } | |
+ return std::forward_as_tuple(output, finput, fgrad_input); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8478,6 +14623,38 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) const { | |
+ profiler::RecordFunction profiler("thnn_conv2d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto& weight_ = unpack(weight, "weight", 5); | |
+ auto& finput_ = unpack(finput, "finput", 9); | |
+ auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 10); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, finput, fgrad_input })) { | |
+ check_output_args("thnn_conv2d_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv2d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, finput, fgrad_input }); | |
+ } | |
+ baseType->thnn_conv2d_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8509,6 +14686,18 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
+Tensor & VariableType::thnn_conv_depthwise2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_depthwise2d_out"); | |
+ Type::thnn_conv_depthwise2d_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d"); | |
auto output = Type::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -8521,6 +14710,37 @@ | |
} | |
return output; | |
} | |
+Tensor & VariableType::thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_depthwise2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& self_ = unpack(self, "self", 1); | |
+ auto& weight_ = unpack(weight, "weight", 2); | |
+ auto bias_ = unpack_opt(bias, "bias", 4); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConvDepthwise2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv_depthwise2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConvDepthwise2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->dilation = dilation; | |
+ } | |
+ baseType->thnn_conv_depthwise2d_forward_out(output_, self_, weight_, kernel_size, bias_, stride, padding, dilation); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_forward_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8548,6 +14768,34 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &> VariableType::thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_depthwise2d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight })) { | |
+ check_output_args("thnn_conv_depthwise2d_backward_out", { grad_input, grad_weight }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv_depthwise2d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
+ } | |
+ baseType->thnn_conv_depthwise2d_backward_out(grad_input_, grad_weight_, grad_output_, self_, weight_, kernel_size, stride, padding, dilation); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ rebase_history({ grad_input, grad_weight }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_output, self, weight )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_backward_out", { grad_input, grad_weight, grad_output, self, weight }, {grad_input, grad_weight} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight); | |
+} | |
std::tuple<Tensor,Tensor> VariableType::thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8578,6 +14826,17 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight)); | |
} | |
+Tensor & VariableType::thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("thnn_conv3d_out"); | |
+ Type::thnn_conv3d_out(output, self, weight, kernel_size, bias, stride, padding); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv3d"); | |
auto output = Type::thnn_conv3d(self, weight, kernel_size, bias, stride, padding); | |
@@ -8589,6 +14848,41 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
+ profiler::RecordFunction profiler("thnn_conv3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& finput_ = unpack(finput, "finput", 1); | |
+ auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 6); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConv3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConv3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ } | |
+ baseType->thnn_conv3d_forward_out(output_, finput_, fgrad_input_, self_, weight_, kernel_size, bias_, stride, padding); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, finput, fgrad_input, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_forward_out", { output, finput, fgrad_input, self, weight, bias }, {output, finput, fgrad_input} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->finput_ = SavedVariable(finput, true); | |
+ grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true); | |
+ } | |
+ return std::forward_as_tuple(output, finput, fgrad_input); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8619,6 +14913,38 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) const { | |
+ profiler::RecordFunction profiler("thnn_conv3d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto& weight_ = unpack(weight, "weight", 5); | |
+ auto& finput_ = unpack(finput, "finput", 9); | |
+ auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 10); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, finput, fgrad_input })) { | |
+ check_output_args("thnn_conv3d_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv3d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, finput, fgrad_input }); | |
+ } | |
+ baseType->thnn_conv3d_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8650,6 +14976,18 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
+Tensor & VariableType::thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_dilated2d_out"); | |
+ Type::thnn_conv_dilated2d_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d"); | |
auto output = Type::thnn_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -8662,6 +15000,43 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_dilated2d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& columns_ = unpack(columns, "columns", 1); | |
+ auto& ones_ = unpack(ones, "ones", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 6); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConvDilated2DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv_dilated2d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConvDilated2DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->dilation = dilation; | |
+ } | |
+ baseType->thnn_conv_dilated2d_forward_out(output_, columns_, ones_, self_, weight_, kernel_size, bias_, stride, padding, dilation); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, columns, ones, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_forward_out", { output, columns, ones, self, weight, bias }, {output, columns, ones} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->columns_ = SavedVariable(columns, true); | |
+ grad_fn->ones_ = SavedVariable(ones, true); | |
+ } | |
+ return std::forward_as_tuple(output, columns, ones); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8694,6 +15069,39 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(columns), std::move(ones)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) const { | |
+ profiler::RecordFunction profiler("thnn_conv_dilated2d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto& weight_ = unpack(weight, "weight", 5); | |
+ auto& columns_ = unpack(columns, "columns", 10); | |
+ auto& ones_ = unpack(ones, "ones", 11); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, columns, ones })) { | |
+ check_output_args("thnn_conv_dilated2d_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv_dilated2d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, columns, ones }); | |
+ } | |
+ baseType->thnn_conv_dilated2d_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -8727,6 +15135,18 @@ | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
+Tensor & VariableType::thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_dilated3d_out"); | |
+ Type::thnn_conv_dilated3d_out(output, self, weight, kernel_size, bias, stride, padding, dilation); | |
+ if (jit::tracer::isTracing( output, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_out", { output, self, weight, bias }, {output} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return output; | |
+} | |
Tensor VariableType::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d"); | |
auto output = Type::thnn_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation); | |
@@ -8739,6 +15159,43 @@ | |
} | |
return output; | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
+ profiler::RecordFunction profiler("thnn_conv_dilated3d_forward_out"); | |
+ auto& output_ = unpack(output, "output", 0); | |
+ auto& columns_ = unpack(columns, "columns", 1); | |
+ auto& ones_ = unpack(ones, "ones", 2); | |
+ auto& self_ = unpack(self, "self", 3); | |
+ auto& weight_ = unpack(weight, "weight", 4); | |
+ auto bias_ = unpack_opt(bias, "bias", 6); | |
+ check_inplace(output); | |
+ std::shared_ptr<ThnnConvDilated3DBackward> grad_fn; | |
+ if (compute_requires_grad({ self, weight, bias })) { | |
+ check_output_args("thnn_conv_dilated3d_forward_out", { output }); | |
+ grad_fn = std::make_shared<ThnnConvDilated3DBackward>(); | |
+ grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
+ grad_fn->self_ = SavedVariable(self, false); | |
+ grad_fn->weight_ = SavedVariable(weight, false); | |
+ grad_fn->kernel_size = kernel_size; | |
+ grad_fn->stride = stride; | |
+ grad_fn->padding = padding; | |
+ grad_fn->dilation = dilation; | |
+ } | |
+ baseType->thnn_conv_dilated3d_forward_out(output_, columns_, ones_, self_, weight_, kernel_size, bias_, stride, padding, dilation); | |
+ increment_version(output); | |
+ rebase_history(output, grad_fn); | |
+ if (jit::tracer::isTracing( output, columns, ones, self, weight, bias )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_forward_out", { output, columns, ones, self, weight, bias }, {output, columns, ones} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ if (grad_fn) { | |
+ grad_fn->columns_ = SavedVariable(columns, true); | |
+ grad_fn->ones_ = SavedVariable(ones, true); | |
+ } | |
+ return std::forward_as_tuple(output, columns, ones); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -8771,6 +15228,39 @@ | |
} | |
return std::make_tuple(std::move(output), std::move(columns), std::move(ones)); | |
} | |
+std::tuple<Tensor &,Tensor &,Tensor &> VariableType::thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) const { | |
+ profiler::RecordFunction profiler("thnn_conv_dilated3d_backward_out"); | |
+ auto grad_input_ = unpack_opt(grad_input, "grad_input", 0); | |
+ auto grad_weight_ = unpack_opt(grad_weight, "grad_weight", 1); | |
+ auto grad_bias_ = unpack_opt(grad_bias, "grad_bias", 2); | |
+ auto& grad_output_ = unpack(grad_output, "grad_output", 3); | |
+ auto& self_ = unpack(self, "self", 4); | |
+ auto& weight_ = unpack(weight, "weight", 5); | |
+ auto& columns_ = unpack(columns, "columns", 10); | |
+ auto& ones_ = unpack(ones, "ones", 11); | |
+ check_inplace(grad_input); | |
+ check_inplace(grad_weight); | |
+ check_inplace(grad_bias); | |
+ std::shared_ptr<Error> grad_fn; | |
+ if (compute_requires_grad({ grad_output, self, weight, columns, ones })) { | |
+ check_output_args("thnn_conv_dilated3d_backward_out", { grad_input, grad_weight, grad_bias }); | |
+ grad_fn = std::make_shared<Error>("the derivative for thnn_conv_dilated3d_backward_out is not implemented"); | |
+ grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, columns, ones }); | |
+ } | |
+ baseType->thnn_conv_dilated3d_backward_out(grad_input_, grad_weight_, grad_bias_, grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_); | |
+ increment_version(grad_input); | |
+ increment_version(grad_weight); | |
+ increment_version(grad_bias); | |
+ rebase_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
+ if (jit::tracer::isTracing( grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones )) { | |
+ jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_backward_out", { grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones }, {grad_input, grad_weight, grad_bias} ); | |
+ setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
+ setattr(n, jit::stringToSymbol("stride"), stride); | |
+ setattr(n, jit::stringToSymbol("padding"), padding); | |
+ setattr(n, jit::stringToSymbol("dilation"), dilation); | |
+ } | |
+ return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
+} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -9393,10 +15883,9 @@ | |
auto& grad_ = unpack(grad, "grad", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<Error> grad_fn; | |
- if (compute_requires_grad({ grad, indices })) { | |
+ if (compute_requires_grad({ grad })) { | |
grad_fn = std::make_shared<Error>("the derivative for embedding_dense_backward is not implemented"); | |
- grad_fn->next_functions = compute_next_functions({ grad, indices }); | |
- | |
+ grad_fn->next_functions = compute_next_functions({ grad }); | |
} | |
auto result = as_variable(baseType->embedding_dense_backward(grad_, indices_, num_weights, padding_idx, scale_grad_by_freq)); | |
set_history(result, grad_fn); | |
@@ -9936,10 +16423,9 @@ | |
auto& self_ = unpack(self, "self", 1); | |
auto& other_ = unpack(other, "other", 2); | |
std::shared_ptr<SWhereBackward> grad_fn; | |
- if (compute_requires_grad({ condition, self, other })) { | |
+ if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<SWhereBackward>(); | |
- grad_fn->next_functions = compute_next_functions({ condition, self, other }); | |
- grad_fn->condition_info = condition; | |
+ grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->condition_ = SavedVariable(condition, false); | |
} | |
auto result = as_variable(baseType->_s_where(condition_, self_, other_)); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment