Created
January 4, 2018 23:26
-
-
Save colesbury/f073eb601b2d95859244c724f0bb11c4 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include "Python.h" | |
#include "VariableType.h" | |
// generated from tools/autograd/templates/VariableType.cpp | |
#include "torch/csrc/autograd/variable.h" | |
#include "torch/csrc/autograd/function.h" | |
#include "torch/csrc/autograd/grad_mode.h" | |
#include "torch/csrc/autograd/saved_variable.h" | |
#include "torch/csrc/autograd/generated/Functions.h" | |
#include "torch/csrc/autograd/functions/tensor.h" | |
#include "torch/csrc/autograd/functions/basic_ops.h" | |
#include "torch/csrc/jit/tracer.h" | |
#include <initializer_list> | |
#include <iostream> | |
#include <functional> | |
#include <cstddef> | |
#ifdef _MSC_VER | |
#ifdef Type | |
#undef Type | |
#endif | |
#endif | |
using namespace at; | |
using namespace torch::autograd::generated; | |
namespace torch { namespace autograd { | |
// Helper methods for working with Attributes (torch/csrc/jit/attributes.h) | |
// The overloaded accessors are convenient for the generated code (since we | |
// don't want to make the codegen do the dispatch manually) | |
static void setattr(jit::Node* n, jit::Symbol name, int64_t v) { n->i_(name, v); } | |
static void setattr(jit::Node* n, jit::Symbol name, const at::Scalar& v) { n->t_(name, v.toTensor()); } | |
static void setattr(jit::Node* n, jit::Symbol name, SparseTensor s) { n->t_(name, s.tref); } | |
static void setattr(jit::Node* n, jit::Symbol name, const at::IntList& v) { n->is_(name, v); } | |
static void setattr(jit::Node* n, jit::Symbol name, bool v) { n->i_(name, v); } | |
static void setattr(jit::Node* n, jit::Symbol name, double v) { n->f_(name, v); } | |
template<std::size_t N> | |
static void setattr(jit::Node* n, jit::Symbol name, std::array<bool, N> v) { n->is_(name, std::vector<int64_t>(v.begin(), v.end())); } | |
VariableType::VariableType(Context* context, Type* baseType) | |
: Type(context) | |
, baseType(baseType) { | |
str = std::string("Variable[") + baseType->toString() + "]"; | |
} | |
ScalarType VariableType::scalarType() const { | |
return baseType->scalarType(); | |
} | |
Backend VariableType::backend() const { | |
return baseType->backend(); | |
} | |
bool VariableType::is_cuda() const { return baseType->is_cuda(); } | |
bool VariableType::is_sparse() const { return baseType->is_sparse(); } | |
bool VariableType::is_distributed() const { return baseType->is_distributed(); } | |
std::unique_ptr<Storage> VariableType::storage() const { | |
return baseType->storage(); | |
} | |
std::unique_ptr<Storage> VariableType::storage(size_t size) const { | |
return baseType->storage(size); | |
} | |
std::unique_ptr<Storage> VariableType::storageFromBlob(void * data, int64_t size, const std::function<void(void*)> & deleter) const { | |
return baseType->storageFromBlob(data, size, deleter); | |
} | |
std::unique_ptr<Storage> VariableType::unsafeStorageFromTH(void * th_pointer, bool retain) const { | |
return baseType->unsafeStorageFromTH(th_pointer, retain); | |
} | |
std::unique_ptr<Storage> VariableType::storageWithAllocator(int64_t size, std::unique_ptr<Allocator> allocator) const { | |
return baseType->storageWithAllocator(size, std::move(allocator)); | |
} | |
Tensor VariableType::unsafeTensorFromTH(void * th_pointer, bool retain) const { | |
return make_variable(baseType->unsafeTensorFromTH(th_pointer, retain), false); | |
} | |
std::unique_ptr<Generator> VariableType::generator() const { | |
return baseType->generator(); | |
} | |
const char * VariableType::toString() const { | |
return str.c_str(); | |
} | |
size_t VariableType::elementSizeInBytes() const { | |
return baseType->elementSizeInBytes(); | |
} | |
Type & VariableType::toBackend(Backend b) const { | |
return *VariableImpl::getType(baseType->toBackend(b)); | |
} | |
Type & VariableType::toScalarType(ScalarType s) const { | |
return *VariableImpl::getType(baseType->toScalarType(s)); | |
} | |
TypeID VariableType::ID() const { | |
throw std::runtime_error("VariableType::ID() not implemented"); | |
} | |
const char * VariableType::typeString() { | |
return "VariableType"; | |
} | |
Variable & VariableType::checked_cast(const Type & type, const Tensor & t, const char * name, int pos) { | |
if(!t.defined()) { | |
runtime_error("Expected a Tensor of type %s but found an undefined Tensor for argument #%d '%s'", | |
type.toString(), pos, name); | |
} | |
if (&t.type() != &type && &t.type() != &type.toBackend(toSparse(t.type().backend()))) { | |
runtime_error("Expected object of type %s but found type %s for argument #%d '%s'", | |
type.toString(), t.type().toString(), pos, name); | |
} | |
return static_cast<Variable&>(const_cast<Tensor&>(t)); | |
} | |
Tensor & VariableType::unpack(const Tensor & t, const char * name, int pos) const { | |
return checked_cast(*this, t, name, pos).data(); | |
} | |
SparseTensor VariableType::unpack(SparseTensor t, const char * name, int pos) const { | |
auto backend = is_cuda() ? kSparseCUDA : kSparseCPU; | |
return SparseTensor(checked_cast(this->toBackend(backend), t.tref, name, pos).data()); | |
} | |
Tensor & VariableType::unpack_long(const Tensor & t, const char * name, int pos) const { | |
auto& type = *VariableImpl::getType(baseType->toScalarType(kLong)); | |
return checked_cast(type, t, name, pos).data(); | |
} | |
Tensor & VariableType::unpack_byte(const Tensor & t, const char * name, int pos) const { | |
auto& type = *VariableImpl::getType(baseType->toScalarType(kByte)); | |
return checked_cast(type, t, name, pos).data(); | |
} | |
Tensor & VariableType::unpack_any(const Tensor & t, const char * name, int pos) const { | |
if (!t.defined()) { | |
runtime_error("Expected a Tensor of type Variable but found an undefined Tensor for argument #%d '%s'", | |
pos, name); | |
} | |
auto scalarType = t.type().scalarType(); | |
auto backend = t.type().backend(); | |
auto& type = *VariableImpl::getType(baseType->toScalarType(scalarType).toBackend(backend)); | |
return checked_cast(type, t, name, pos).data(); | |
} | |
Tensor VariableType::unpack_opt(const Tensor & t, const char * name, int pos) const { | |
if(!t.defined()) { | |
return Tensor(); | |
} | |
return unpack(t, name, pos); | |
} | |
std::vector<at::Tensor> VariableType::unpack(at::TensorList tl, const char *name, int pos) const { | |
std::vector<at::Tensor> ret(tl.size()); | |
for (size_t i = 0; i < tl.size(); ++i) { | |
const auto &t = tl[i]; | |
if (!t.defined()) { | |
runtime_error("Expected a Tensor of type %s but found an undefined Tensor at position #%d " | |
"for iterable argument #%d '%s'", | |
toString(), i, pos, name); | |
} | |
if (&t.type() == this) { | |
ret[i] = static_cast<const Variable&>(t).data(); | |
} else { | |
runtime_error("Expected object of type %s but found type %s at position #%d " | |
"for iterable argument #%d '%s'", | |
toString(),t.type().toString(), i, pos, name); | |
} | |
} | |
return ret; | |
} | |
std::vector<at::Tensor> VariableType::unpack_idxs(at::TensorList tl, const char *name, int pos) const { | |
auto& longType = *VariableImpl::getType(baseType->toScalarType(kLong)); | |
auto& byteType = *VariableImpl::getType(baseType->toScalarType(kByte)); | |
std::vector<at::Tensor> ret(tl.size()); | |
for (size_t i = 0; i < tl.size(); ++i) { | |
const auto &t = tl[i]; | |
if (!t.defined()) { | |
continue; | |
} else if (!(t.type() == longType || t.type() == byteType)) { | |
runtime_error("Expected object of type %s or %s but found type %s at position #%d " | |
"for iterable argument #%d '%s'", | |
longType.toString(), byteType.toString(), t.type().toString(), | |
i, pos, name); | |
} else { | |
ret[i] = static_cast<const Variable&>(t).data(); | |
} | |
} | |
return ret; | |
} | |
static Tensor as_variable(Tensor tensor) { | |
return make_variable(std::move(tensor)); | |
} | |
static std::tuple<Tensor, Tensor> | |
as_variable(std::tuple<Tensor, Tensor> tensors) { | |
return std::make_tuple<>( | |
make_variable(std::move(std::get<0>(tensors))), | |
make_variable(std::move(std::get<1>(tensors)))); | |
} | |
static std::tuple<Tensor, Tensor, Tensor> | |
as_variable(std::tuple<Tensor, Tensor, Tensor> tensors) { | |
return std::make_tuple<>( | |
make_variable(std::move(std::get<0>(tensors))), | |
make_variable(std::move(std::get<1>(tensors))), | |
make_variable(std::move(std::get<2>(tensors)))); | |
} | |
static std::tuple<Tensor, Tensor, Tensor, Tensor> | |
as_variable(std::tuple<Tensor, Tensor, Tensor, Tensor> tensors) { | |
return std::make_tuple<>( | |
make_variable(std::move(std::get<0>(tensors))), | |
make_variable(std::move(std::get<1>(tensors))), | |
make_variable(std::move(std::get<2>(tensors))), | |
make_variable(std::move(std::get<3>(tensors)))); | |
} | |
static std::vector<Tensor> as_variable(TensorList tl) { | |
std::vector<Tensor> variables; | |
for (auto& t : tl) { | |
variables.emplace_back(make_variable(std::move(t))); | |
} | |
return variables; | |
} | |
static Tensor as_view(const Tensor & base, Tensor tensor) { | |
auto base_var = Variable(base); | |
if (base_var.is_view()) { | |
base_var = base_var.base(); | |
} | |
return make_variable_view(std::move(base_var), std::move(tensor)); | |
} | |
static void ensure_no_aten_scalars(Tensor & data) { | |
if (data.defined() && data.dim() == 0) { | |
data.as_strided_({1}, {1}); | |
} | |
} | |
template<typename T> | |
static bool computes_grad_tmpl(T tensors) { | |
if (!GradMode::is_enabled()) { | |
return false; | |
} | |
for (const Tensor& tensor : tensors) { | |
auto& var = static_cast<const Variable&>(tensor); | |
if (var.defined() && var.requires_grad()) { | |
return true; | |
} | |
} | |
return false; | |
} | |
using TensorRef = std::reference_wrapper<const Tensor>; | |
using TensorRefList = std::initializer_list<TensorRef>; | |
static bool compute_requires_grad(const TensorRefList& tensors) { | |
return computes_grad_tmpl(tensors); | |
} | |
static bool compute_requires_grad(TensorList tensors) { | |
return computes_grad_tmpl(tensors); | |
} | |
static void check_no_requires_grad(const Tensor& tensor, const char* name) { | |
auto& var = static_cast<const Variable&>(tensor); | |
if (var.defined() && var.requires_grad()) { | |
std::string msg = "the derivative for '"; | |
msg += name; | |
msg += "' is not implemented"; | |
throw std::runtime_error(msg); | |
} | |
} | |
static function_list compute_next_functions(const std::initializer_list<Tensor>& tensors) { | |
return Function::flags(tensors).next_functions; | |
} | |
static function_list compute_next_functions(TensorList tensors) { | |
return Function::flags(tensors).next_functions; | |
} | |
static void check_inplace(const Tensor& tensor) { | |
auto& var = static_cast<const Variable&>(tensor); | |
if (var.requires_grad() && var.is_leaf() && GradMode::is_enabled()) { | |
at::runtime_error( | |
"a leaf Variable that requires grad has been used in an in-place operation."); | |
} | |
} | |
static void rebase_history(Tensor& tensor, std::shared_ptr<Function> grad_fn, int output_nr=0) { | |
if (!tensor.defined()) { | |
return; | |
} | |
auto& var = static_cast<Variable&>(tensor); | |
if (grad_fn) { | |
grad_fn->num_inputs = 1; | |
var.rebase_history(output_nr, std::move(grad_fn)); | |
} | |
} | |
// var must be the only differentiable output of the function. Use the ArrayRef | |
// overload for functions with multiple differentiable outputs. | |
static void set_history(Tensor& t, std::shared_ptr<Function> grad_fn, int output_nr=0) { | |
auto& var = static_cast<Variable&>(t); | |
if (grad_fn) { | |
grad_fn->num_inputs = 1; | |
var.get()->output_nr = output_nr; | |
var.get()->_grad_fn = std::move(grad_fn); | |
} | |
} | |
static void set_history(at::ArrayRef<Tensor> tl, std::shared_ptr<Function> grad_fn) { | |
if (grad_fn) { | |
grad_fn->num_inputs = tl.size(); | |
int64_t output_nr = 0; | |
for (auto& t : tl) { | |
if (!t.defined()) continue; | |
// TODO: combine this with the Variable construction | |
auto& var = static_cast<const Variable&>(t); | |
var.get()->output_nr = output_nr; | |
var.get()->_grad_fn = grad_fn; | |
output_nr++; | |
} | |
} | |
} | |
static at::ArrayRef<Variable> flatten(TensorList tensors) { | |
auto data = static_cast<const Variable*>(tensors.data()); | |
return at::ArrayRef<Variable>(data, tensors.size()); | |
} | |
static variable_list flatten(const Tensor& x, const TensorList& y) { | |
std::vector<Variable> r; | |
r.reserve(1 + y.size()); | |
r.emplace_back(x); | |
r.insert(r.end(), y.begin(), y.end()); | |
return r; | |
} | |
static variable_list flatten(const Tensor& x, TensorList y, const Tensor& z) { | |
std::vector<Variable> r; | |
r.reserve(2 + y.size()); | |
r.emplace_back(x); | |
r.insert(r.end(), y.begin(), y.end()); | |
r.emplace_back(z); | |
return r; | |
} | |
static void increment_version(const Tensor & t) { | |
auto& var = static_cast<const Variable&>(t); | |
var.version_counter().increment(); | |
} | |
static bool isFloatingPoint(ScalarType s) { | |
return s == kFloat || s == kDouble || s == kHalf; | |
} | |
Tensor & VariableType::s_copy_(Tensor & self, const Tensor & src, bool async) const { | |
// TODO: once copy is exposed in Declarations.yaml we may be able to bind | |
// it automatically | |
auto& self_ = unpack(self, "self", 0); | |
auto& src_ = unpack_any(src, "src", 1); | |
check_inplace(self); | |
std::shared_ptr<CopyBackwards> grad_fn; | |
auto requires_grad = compute_requires_grad({ self, src }); | |
requires_grad &= isFloatingPoint(self.type().scalarType()); | |
if (requires_grad) { | |
grad_fn = std::make_shared<CopyBackwards>(); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
grad_fn->num_inputs = 1; | |
grad_fn->src_type = &src.type(); | |
grad_fn->src_device = src.is_cuda() ? src.get_device() : -1; | |
} | |
baseType->s_copy_(self_, src_, async); | |
increment_version(self); | |
rebase_history(static_cast<Variable&>(self), std::move(grad_fn)); | |
return self; | |
} | |
Tensor & VariableType::resize_(Tensor & self, IntList size) const { | |
auto& self_ = unpack(self, "self", 0); | |
if (static_cast<Variable&>(self).requires_grad()) { | |
at::runtime_error("cannot resize variables that require grad"); | |
} | |
baseType->resize_(self_, size); | |
return self; | |
} | |
Tensor & VariableType::resize_as_(Tensor & self, const Tensor & the_template) const { | |
return resize_(self, the_template.sizes()); | |
} | |
Tensor VariableType::contiguous(const Tensor & self) const { | |
unpack(self, "self", 0); | |
if (self.is_contiguous()) { | |
return self; | |
} | |
return self.clone(); | |
} | |
static std::vector<int64_t> to_arg_sizes(TensorList tensors, int64_t dim) { | |
std::vector<int64_t> arg_sizes(tensors.size()); | |
for (size_t i = 0; i < tensors.size(); ++i) { | |
arg_sizes[i] = tensors[i].size(dim); | |
} | |
return arg_sizes; | |
} | |
int64_t VariableType::storage_offset(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->storage_offset(self_); | |
return result; | |
} | |
Tensor VariableType::zeros(IntList size) const { | |
profiler::RecordFunction profiler("zeros"); | |
auto result = as_variable(baseType->zeros(size)); | |
return result; | |
} | |
Tensor VariableType::zeros_like(const Tensor & input) const { | |
profiler::RecordFunction profiler("zeros_like"); | |
auto& input_ = unpack(input, "input", 0); | |
auto result = as_variable(baseType->zeros_like(input_)); | |
if (jit::tracer::isTracing( input )) { | |
jit::Node *n = jit::tracer::recordTrace( "zeros_like", { input }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::ones(IntList size) const { | |
profiler::RecordFunction profiler("ones"); | |
auto result = as_variable(baseType->ones(size)); | |
return result; | |
} | |
Tensor VariableType::ones_like(const Tensor & input) const { | |
profiler::RecordFunction profiler("ones_like"); | |
auto& input_ = unpack(input, "input", 0); | |
auto result = as_variable(baseType->ones_like(input_)); | |
if (jit::tracer::isTracing( input )) { | |
jit::Node *n = jit::tracer::recordTrace( "ones_like", { input }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
int64_t VariableType::numel(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->numel(self_); | |
return result; | |
} | |
Tensor & VariableType::set_(Tensor & self, Storage & storage) const { | |
profiler::RecordFunction profiler("set_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->set_(self_, storage); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::set_(Tensor & self, Storage & sourceStorage, int64_t storage_offset, IntList size, IntList stride) const { | |
profiler::RecordFunction profiler("set_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->set_(self_, sourceStorage, storage_offset, size, stride); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::set_(Tensor & self, const Tensor & source) const { | |
profiler::RecordFunction profiler("set_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& source_ = unpack(source, "source", 1); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, source })) { | |
grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
} | |
baseType->set_(self_, source_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "set", { self, source }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::set_(Tensor & self) const { | |
profiler::RecordFunction profiler("set_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for set_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->set_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "set", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::fill_(Tensor & self, Scalar value) const { | |
profiler::RecordFunction profiler("fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FillBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FillBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->fill_(self_, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "fill", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::fill_(Tensor & self, const Tensor & value) const { | |
profiler::RecordFunction profiler("fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& value_ = unpack(value, "value", 1); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, value })) { | |
grad_fn = std::make_shared<Error>("the derivative for fill_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, value }); | |
} | |
baseType->fill_(self_, value_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, value )) { | |
jit::Node *n = jit::tracer::recordTrace( "fill", { self, value }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
bool VariableType::is_contiguous(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->is_contiguous(self_); | |
return result; | |
} | |
bool VariableType::is_set_to(const Tensor & self, const Tensor & tensor) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor_ = unpack(tensor, "tensor", 1); | |
auto result = baseType->is_set_to(self_, tensor_); | |
return result; | |
} | |
Tensor & VariableType::s_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) const { | |
profiler::RecordFunction profiler("masked_fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
check_inplace(self); | |
std::shared_ptr<MaskedFillBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaskedFillBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->mask_ = SavedVariable(mask, false); | |
} | |
baseType->s_masked_fill_(self_, mask_, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, mask )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_fill", { self, mask }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value) const { | |
profiler::RecordFunction profiler("masked_fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
auto& value_ = unpack(value, "value", 2); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, mask, value })) { | |
grad_fn = std::make_shared<Error>("the derivative for masked_fill_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, mask, value }); | |
} | |
baseType->s_masked_fill_(self_, mask_, value_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, mask, value )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_fill", { self, mask, value }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::s_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) const { | |
profiler::RecordFunction profiler("masked_scatter_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
auto& source_ = unpack(source, "source", 2); | |
check_inplace(self); | |
std::shared_ptr<MaskedScatterBackward> grad_fn; | |
if (compute_requires_grad({ self, source })) { | |
grad_fn = std::make_shared<MaskedScatterBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->mask_ = SavedVariable(mask, false); | |
grad_fn->source_sizes = source.sizes(); | |
} | |
baseType->s_masked_scatter_(self_, mask_, source_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, mask, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_scatter", { self, mask, source }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::s_masked_select(const Tensor & self, const Tensor & mask) const { | |
profiler::RecordFunction profiler("masked_select"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mask_ = unpack_byte(mask, "mask", 1); | |
std::shared_ptr<MaskedSelectBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaskedSelectBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
grad_fn->mask_ = SavedVariable(mask, false); | |
} | |
auto result = as_variable(baseType->s_masked_select(self_, mask_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, mask )) { | |
jit::Node *n = jit::tracer::recordTrace( "masked_select", { self, mask }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::transpose(const Tensor & self, int64_t dim0, int64_t dim1) const { | |
profiler::RecordFunction profiler("transpose"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TransposeBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TransposeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim0 = dim0; | |
grad_fn->dim1 = dim1; | |
} | |
auto result = as_view(self, baseType->transpose(self_, dim0, dim1)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim0"), dim0); | |
setattr(n, jit::stringToSymbol("dim1"), dim1); | |
} | |
return result; | |
} | |
Tensor & VariableType::transpose_(Tensor & self, int64_t dim0, int64_t dim1) const { | |
profiler::RecordFunction profiler("transpose_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TransposeBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TransposeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim0 = dim0; | |
grad_fn->dim1 = dim1; | |
} | |
baseType->transpose_(self_, dim0, dim1); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "transpose", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("dim0"), dim0); | |
setattr(n, jit::stringToSymbol("dim1"), dim1); | |
} | |
return self; | |
} | |
Tensor VariableType::t(const Tensor & self) const { | |
profiler::RecordFunction profiler("t"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_view(self, baseType->t(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "t", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::t_(Tensor & self) const { | |
profiler::RecordFunction profiler("t_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->t_(self_); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "t", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::nonzero(const Tensor & self) const { | |
profiler::RecordFunction profiler("nonzero"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->nonzero(self_)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "nonzero", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::clone(const Tensor & self) const { | |
profiler::RecordFunction profiler("clone"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CloneBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CloneBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->clone(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clone", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::view(const Tensor & self, IntList size) const { | |
profiler::RecordFunction profiler("view"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ViewBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ViewBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto result = as_view(self, baseType->view(self_, size)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "view", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
} | |
return result; | |
} | |
Tensor VariableType::index_select(const Tensor & self, int64_t dim, const Tensor & index) const { | |
profiler::RecordFunction profiler("index_select"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
std::shared_ptr<IndexSelectBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<IndexSelectBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
auto result = as_variable(baseType->index_select(self_, dim, index_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_select", { self, index }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor & VariableType::index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const { | |
profiler::RecordFunction profiler("index_copy_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& source_ = unpack(source, "source", 3); | |
check_inplace(self); | |
std::shared_ptr<IndexCopyBackward> grad_fn; | |
if (compute_requires_grad({ self, source })) { | |
grad_fn = std::make_shared<IndexCopyBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->index_copy_(self_, dim, index_, source_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_copy", { self, index, source }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::take(const Tensor & self, const Tensor & index) const { | |
profiler::RecordFunction profiler("take"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 1); | |
std::shared_ptr<TakeBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TakeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
auto result = as_variable(baseType->take(self_, index_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "take", { self, index }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) const { | |
profiler::RecordFunction profiler("put_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 1); | |
auto& source_ = unpack(source, "source", 2); | |
check_inplace(self); | |
std::shared_ptr<PutBackward> grad_fn; | |
if (compute_requires_grad({ self, source })) { | |
grad_fn = std::make_shared<PutBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->index_ = SavedVariable(index, false); | |
grad_fn->source_info = source; | |
grad_fn->accumulate = accumulate; | |
} | |
baseType->put_(self_, index_, source_, accumulate); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "put", { self, index, source }, { self } ); | |
setattr(n, jit::stringToSymbol("accumulate"), accumulate); | |
} | |
return self; | |
} | |
Tensor & VariableType::index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) const { | |
profiler::RecordFunction profiler("index_add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& source_ = unpack(source, "source", 3); | |
check_inplace(self); | |
std::shared_ptr<IndexAddBackward> grad_fn; | |
if (compute_requires_grad({ self, source })) { | |
grad_fn = std::make_shared<IndexAddBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, source }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->index_add_(self_, dim, index_, source_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index, source )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_add", { self, index, source }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor & VariableType::index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) const { | |
profiler::RecordFunction profiler("index_fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
check_inplace(self); | |
std::shared_ptr<IndexFillBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<IndexFillBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->index_fill_(self_, dim, index_, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_fill", { self, index }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) const { | |
profiler::RecordFunction profiler("index_fill_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& value_ = unpack(value, "value", 3); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, index, value })) { | |
grad_fn = std::make_shared<Error>("the derivative for index_fill_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, index, value }); | |
} | |
baseType->index_fill_(self_, dim, index_, value_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index, value )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_fill", { self, index, value }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) const { | |
profiler::RecordFunction profiler("unfold"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UnfoldBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UnfoldBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dimension = dimension; | |
grad_fn->size = size; | |
grad_fn->step = step; | |
} | |
auto result = as_view(self, baseType->unfold(self_, dimension, size, step)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "unfold", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dimension"), dimension); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("step"), step); | |
} | |
return result; | |
} | |
Tensor VariableType::range(Scalar start, Scalar end, Scalar step) const { | |
profiler::RecordFunction profiler("range"); | |
auto result = as_variable(baseType->range(start, end, step)); | |
return result; | |
} | |
Tensor VariableType::arange(Scalar start, Scalar end, Scalar step) const { | |
profiler::RecordFunction profiler("arange"); | |
auto result = as_variable(baseType->arange(start, end, step)); | |
return result; | |
} | |
Tensor VariableType::arange(Scalar end) const { | |
profiler::RecordFunction profiler("arange"); | |
auto result = as_variable(baseType->arange(end)); | |
return result; | |
} | |
Tensor & VariableType::scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) const { | |
profiler::RecordFunction profiler("scatter_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& src_ = unpack(src, "src", 3); | |
check_inplace(self); | |
std::shared_ptr<ScatterBackward0> grad_fn; | |
if (compute_requires_grad({ self, src })) { | |
grad_fn = std::make_shared<ScatterBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->scatter_(self_, dim, index_, src_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index, src )) { | |
jit::Node *n = jit::tracer::recordTrace( "scatter", { self, index, src }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor & VariableType::scatter_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) const { | |
profiler::RecordFunction profiler("scatter_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
check_inplace(self); | |
std::shared_ptr<ScatterBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ScatterBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->scatter_(self_, dim, index_, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "scatter", { self, index }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) const { | |
profiler::RecordFunction profiler("scatter_add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
auto& src_ = unpack(src, "src", 3); | |
check_inplace(self); | |
std::shared_ptr<ScatterAddBackward> grad_fn; | |
if (compute_requires_grad({ self, src })) { | |
grad_fn = std::make_shared<ScatterAddBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
baseType->scatter_add_(self_, dim, index_, src_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, index, src )) { | |
jit::Node *n = jit::tracer::recordTrace( "scatter_add", { self, index, src }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::gather(const Tensor & self, int64_t dim, const Tensor & index) const { | |
profiler::RecordFunction profiler("gather"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& index_ = unpack_long(index, "index", 2); | |
std::shared_ptr<GatherBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<GatherBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->index_ = SavedVariable(index, false); | |
} | |
auto result = as_variable(baseType->gather(self_, dim, index_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, index )) { | |
jit::Node *n = jit::tracer::recordTrace( "gather", { self, index }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
void* VariableType::data_ptr(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->data_ptr(self_); | |
return result; | |
} | |
bool VariableType::equal(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("equal"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = baseType->equal(self_, other_); | |
return result; | |
} | |
Tensor VariableType::__and__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__and__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->__and__(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__and_", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s___and__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__and__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s___and__(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__and_", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::__iand__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__iand__"); | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__iand__(self_, other); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__iand_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s___iand__(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__iand__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___iand__(self_, other_); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__iand_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::__or__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__or__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->__or__(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__or_", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s___or__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__or__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s___or__(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__or_", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::__ior__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__ior__"); | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__ior__(self_, other); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ior_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s___ior__(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__ior__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___ior__(self_, other_); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ior_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::__xor__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__xor__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->__xor__(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__xor_", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s___xor__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__xor__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s___xor__(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__xor_", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::__ixor__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__ixor__"); | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__ixor__(self_, other); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ixor_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s___ixor__(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__ixor__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___ixor__(self_, other_); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ixor_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::__lshift__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__lshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->__lshift__(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__lshift_", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s___lshift__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__lshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s___lshift__(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__lshift_", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::__ilshift__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__ilshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__ilshift__(self_, other); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ilshift_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s___ilshift__(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__ilshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___ilshift__(self_, other_); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__ilshift_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::__rshift__(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__rshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->__rshift__(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__rshift_", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s___rshift__(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__rshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s___rshift__(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__rshift_", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::__irshift__(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("__irshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
baseType->__irshift__(self_, other); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "__irshift_", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s___irshift__(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("__irshift__"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
baseType->s___irshift__(self_, other_); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "__irshift_", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::lt(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("lt"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->lt(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_lt(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("lt"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s_lt(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::lt_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("lt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LtBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LtBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->lt_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_lt_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("lt_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<LtBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<LtBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_lt_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "lt", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::gt(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("gt"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->gt(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_gt(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("gt"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s_gt(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::gt_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("gt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<GtBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<GtBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->gt_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_gt_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("gt_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<GtBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<GtBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_gt_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "gt", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::le(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("le"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->le(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_le(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("le"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s_le(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::le_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("le_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LeBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->le_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_le_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("le_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<LeBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<LeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_le_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "le", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::ge(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ge"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->ge(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_ge(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ge"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s_ge(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::ge_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ge_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<GeBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<GeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->ge_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_ge_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ge_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<GeBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<GeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_ge_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ge", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::eq(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("eq"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->eq(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_eq(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("eq"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s_eq(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::eq_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("eq_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<EqBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<EqBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_info = self; | |
} | |
baseType->eq_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_eq_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("eq_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<EqBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<EqBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_eq_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "eq", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::ne(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ne"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->ne(self_, other)); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_ne(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ne"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
auto result = as_variable(baseType->s_ne(self_, other_)); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::ne_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("ne_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for ne_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->ne_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_ne_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("ne_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<NeBackward> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<NeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_info = other; | |
grad_fn->self_info = self; | |
} | |
baseType->s_ne_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "ne", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
std::tuple<Tensor,Tensor> VariableType::min(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MinBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MinBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
Tensor min, min_indices; | |
std::tie(min, min_indices) = as_variable(baseType->min(self_, dim, keepdim)); | |
set_history(min, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "min", { self }, { min, min_indices } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->min_indices_ = SavedVariable(min_indices, true); | |
} | |
return std::make_tuple(std::move(min), std::move(min_indices)); | |
} | |
Tensor VariableType::s_min(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<MinBackward2> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<MinBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->s_min(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "min", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::min(const Tensor & self) const { | |
profiler::RecordFunction profiler("min"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MinBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MinBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->min(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "min", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::max(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaxBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
Tensor max, max_indices; | |
std::tie(max, max_indices) = as_variable(baseType->max(self_, dim, keepdim)); | |
set_history(max, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max", { self }, { max, max_indices } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->max_indices_ = SavedVariable(max_indices, true); | |
} | |
return std::make_tuple(std::move(max), std::move(max_indices)); | |
} | |
Tensor VariableType::s_max(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<MaxBackward2> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<MaxBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->s_max(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "max", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::max(const Tensor & self) const { | |
profiler::RecordFunction profiler("max"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaxBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->max(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("kthvalue"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<KthvalueBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<KthvalueBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
Tensor values, indices; | |
std::tie(values, indices) = as_variable(baseType->kthvalue(self_, k, dim, keepdim)); | |
set_history(values, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "kthvalue", { self }, { values, indices } ); | |
setattr(n, jit::stringToSymbol("k"), k); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::mode(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("mode"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ModeBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ModeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
Tensor values, indices; | |
std::tie(values, indices) = as_variable(baseType->mode(self_, dim, keepdim)); | |
set_history(values, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mode", { self }, { values, indices } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::median(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("median"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MedianBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MedianBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
Tensor values, indices; | |
std::tie(values, indices) = as_variable(baseType->median(self_, dim, keepdim)); | |
set_history(values, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "median", { self }, { values, indices } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
Tensor VariableType::median(const Tensor & self) const { | |
profiler::RecordFunction profiler("median"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MedianBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MedianBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->median(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "median", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::sort(const Tensor & self, int64_t dim, bool descending) const { | |
profiler::RecordFunction profiler("sort"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SortBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SortBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
} | |
Tensor values, indices; | |
std::tie(values, indices) = as_variable(baseType->sort(self_, dim, descending)); | |
set_history(values, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sort", { self }, { values, indices } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("descending"), descending); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) const { | |
profiler::RecordFunction profiler("topk"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TopkBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TopkBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
} | |
Tensor values, indices; | |
std::tie(values, indices) = as_variable(baseType->topk(self_, k, dim, largest, sorted)); | |
set_history(values, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "topk", { self }, { values, indices } ); | |
setattr(n, jit::stringToSymbol("k"), k); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("largest"), largest); | |
setattr(n, jit::stringToSymbol("sorted"), sorted); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(values), std::move(indices)); | |
} | |
bool VariableType::all(const Tensor & self) const { | |
profiler::RecordFunction profiler("all"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->all(self_); | |
return result; | |
} | |
bool VariableType::any(const Tensor & self) const { | |
profiler::RecordFunction profiler("any"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->any(self_); | |
return result; | |
} | |
int64_t VariableType::get_device(const Tensor & self) const { | |
auto& self_ = unpack(self, "self", 0); | |
auto result = baseType->get_device(self_); | |
return result; | |
} | |
Tensor VariableType::abs(const Tensor & self) const { | |
profiler::RecordFunction profiler("abs"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AbsBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AbsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->abs(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::abs_(Tensor & self) const { | |
profiler::RecordFunction profiler("abs_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AbsBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AbsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->abs_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "abs", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::sigmoid_(Tensor & self) const { | |
profiler::RecordFunction profiler("sigmoid_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SigmoidBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SigmoidBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sigmoid_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("sigmoid"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SigmoidBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SigmoidBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->sigmoid(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sigmoid", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::log_(Tensor & self) const { | |
profiler::RecordFunction profiler("log_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LogBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LogBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->log_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::log(const Tensor & self) const { | |
profiler::RecordFunction profiler("log"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LogBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LogBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->log(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::log1p_(Tensor & self) const { | |
profiler::RecordFunction profiler("log1p_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Log1PBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Log1PBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->log1p_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::log1p(const Tensor & self) const { | |
profiler::RecordFunction profiler("log1p"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Log1PBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Log1PBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->log1p(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log1p", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::lgamma(const Tensor & self) const { | |
profiler::RecordFunction profiler("lgamma"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LgammaBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LgammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->lgamma(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::lgamma_(Tensor & self) const { | |
profiler::RecordFunction profiler("lgamma_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LgammaBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LgammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->lgamma_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "lgamma", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::digamma(const Tensor & self) const { | |
profiler::RecordFunction profiler("digamma"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DigammaBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<DigammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->digamma(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "digamma", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::digamma_(Tensor & self) const { | |
profiler::RecordFunction profiler("digamma_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<DigammaBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<DigammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->digamma_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "digamma", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::polygamma(int64_t n, const Tensor & self) const { | |
profiler::RecordFunction profiler("polygamma"); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<PolygammaBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PolygammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->n = n; | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->polygamma(n, self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "polygamma", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("n"), n); | |
} | |
return result; | |
} | |
Tensor & VariableType::polygamma_(Tensor & self, int64_t n) const { | |
profiler::RecordFunction profiler("polygamma_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for polygamma_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->polygamma_(self_, n); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "polygamma", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("n"), n); | |
} | |
return self; | |
} | |
Tensor & VariableType::exp_(Tensor & self) const { | |
profiler::RecordFunction profiler("exp_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ExpBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ExpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->exp_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::exp(const Tensor & self) const { | |
profiler::RecordFunction profiler("exp"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ExpBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ExpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->exp(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "exp", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::expm1_(Tensor & self) const { | |
profiler::RecordFunction profiler("expm1_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Expm1Backward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Expm1Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->expm1_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::expm1(const Tensor & self) const { | |
profiler::RecordFunction profiler("expm1"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Expm1Backward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Expm1Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->expm1(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "expm1", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::cos_(Tensor & self) const { | |
profiler::RecordFunction profiler("cos_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CosBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->cos_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::cos(const Tensor & self) const { | |
profiler::RecordFunction profiler("cos"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CosBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->cos(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cos", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::acos_(Tensor & self) const { | |
profiler::RecordFunction profiler("acos_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AcosBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AcosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->acos_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::acos(const Tensor & self) const { | |
profiler::RecordFunction profiler("acos"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AcosBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AcosBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->acos(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "acos", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::cosh_(Tensor & self) const { | |
profiler::RecordFunction profiler("cosh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CoshBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CoshBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->cosh_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::cosh(const Tensor & self) const { | |
profiler::RecordFunction profiler("cosh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CoshBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CoshBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->cosh(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cosh", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::sin_(Tensor & self) const { | |
profiler::RecordFunction profiler("sin_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SinBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->sin_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sin(const Tensor & self) const { | |
profiler::RecordFunction profiler("sin"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SinBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->sin(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sin", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::asin_(Tensor & self) const { | |
profiler::RecordFunction profiler("asin_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AsinBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AsinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->asin_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::asin(const Tensor & self) const { | |
profiler::RecordFunction profiler("asin"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AsinBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AsinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->asin(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "asin", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::sinh_(Tensor & self) const { | |
profiler::RecordFunction profiler("sinh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SinhBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SinhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->sinh_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sinh(const Tensor & self) const { | |
profiler::RecordFunction profiler("sinh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SinhBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SinhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->sinh(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sinh", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::tan_(Tensor & self) const { | |
profiler::RecordFunction profiler("tan_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TanBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->tan_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::tan(const Tensor & self) const { | |
profiler::RecordFunction profiler("tan"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TanBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->tan(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tan", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::atan_(Tensor & self) const { | |
profiler::RecordFunction profiler("atan_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AtanBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AtanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->atan_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::atan(const Tensor & self) const { | |
profiler::RecordFunction profiler("atan"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AtanBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AtanBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->atan(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::tanh_(Tensor & self) const { | |
profiler::RecordFunction profiler("tanh_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TanhBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TanhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->tanh_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::tanh(const Tensor & self) const { | |
profiler::RecordFunction profiler("tanh"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TanhBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TanhBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->tanh(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tanh", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::erf_(Tensor & self) const { | |
profiler::RecordFunction profiler("erf_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ErfBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ErfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->erf_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::erf(const Tensor & self) const { | |
profiler::RecordFunction profiler("erf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ErfBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ErfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->erf(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erf", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::erfinv_(Tensor & self) const { | |
profiler::RecordFunction profiler("erfinv_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ErfinvBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ErfinvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
} | |
baseType->erfinv_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::erfinv(const Tensor & self) const { | |
profiler::RecordFunction profiler("erfinv"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ErfinvBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ErfinvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->erfinv(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "erfinv", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::sqrt_(Tensor & self) const { | |
profiler::RecordFunction profiler("sqrt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SqrtBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sqrt_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::sqrt(const Tensor & self) const { | |
profiler::RecordFunction profiler("sqrt"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SqrtBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->sqrt(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sqrt", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::rsqrt_(Tensor & self) const { | |
profiler::RecordFunction profiler("rsqrt_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RsqrtBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RsqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->rsqrt_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::rsqrt(const Tensor & self) const { | |
profiler::RecordFunction profiler("rsqrt"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RsqrtBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RsqrtBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->rsqrt(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "rsqrt", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::ceil_(Tensor & self) const { | |
profiler::RecordFunction profiler("ceil_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CeilBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CeilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->ceil_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::ceil(const Tensor & self) const { | |
profiler::RecordFunction profiler("ceil"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CeilBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CeilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->ceil(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "ceil", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::floor_(Tensor & self) const { | |
profiler::RecordFunction profiler("floor_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FloorBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FloorBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->floor_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::floor(const Tensor & self) const { | |
profiler::RecordFunction profiler("floor"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<FloorBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FloorBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->floor(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "floor", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::round_(Tensor & self) const { | |
profiler::RecordFunction profiler("round_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RoundBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RoundBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->round_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "round", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::round(const Tensor & self) const { | |
profiler::RecordFunction profiler("round"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RoundBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RoundBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->round(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "round", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::trunc_(Tensor & self) const { | |
profiler::RecordFunction profiler("trunc_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TruncBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TruncBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->trunc_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::trunc(const Tensor & self) const { | |
profiler::RecordFunction profiler("trunc"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TruncBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TruncBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->trunc(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "trunc", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::frac_(Tensor & self) const { | |
profiler::RecordFunction profiler("frac_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FracBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FracBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->frac_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::frac(const Tensor & self) const { | |
profiler::RecordFunction profiler("frac"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<FracBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FracBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->frac(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "frac", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::mean(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("mean"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MeanBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MeanBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_argsize_dim = self.size(dim); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto result = as_variable(baseType->mean(self_, dim, keepdim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
return result; | |
} | |
Tensor VariableType::mean(const Tensor & self) const { | |
profiler::RecordFunction profiler("mean"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MeanBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MeanBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->mean(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mean", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::var(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
profiler::RecordFunction profiler("var"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<VarBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<VarBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->unbiased = unbiased; | |
grad_fn->keepdim = keepdim; | |
} | |
auto result = as_variable(baseType->var(self_, dim, unbiased, keepdim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "var", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
return result; | |
} | |
Tensor VariableType::var(const Tensor & self, bool unbiased) const { | |
profiler::RecordFunction profiler("var"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<VarBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<VarBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->unbiased = unbiased; | |
} | |
auto result = as_variable(baseType->var(self_, unbiased)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "var", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
} | |
return result; | |
} | |
Tensor VariableType::std(const Tensor & self, int64_t dim, bool unbiased, bool keepdim) const { | |
profiler::RecordFunction profiler("std"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<StdBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<StdBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->unbiased = unbiased; | |
grad_fn->keepdim = keepdim; | |
} | |
auto result = as_variable(baseType->std(self_, dim, unbiased, keepdim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "std", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::std(const Tensor & self, bool unbiased) const { | |
profiler::RecordFunction profiler("std"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<StdBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<StdBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->unbiased = unbiased; | |
} | |
auto result = as_variable(baseType->std(self_, unbiased)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "std", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("unbiased"), unbiased); | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::norm(const Tensor & self, Scalar p, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("norm"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NormBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NormBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->p = p; | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto result = as_variable(baseType->norm(self_, p, dim, keepdim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::norm(const Tensor & self, Scalar p) const { | |
profiler::RecordFunction profiler("norm"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NormBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NormBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->p = p; | |
} | |
auto result = as_variable(baseType->norm(self_, p)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "norm", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const { | |
profiler::RecordFunction profiler("renorm"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RenormBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RenormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->p = p; | |
grad_fn->dim = dim; | |
grad_fn->maxnorm = maxnorm; | |
} | |
auto result = as_variable(baseType->renorm(self_, p, dim, maxnorm)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("maxnorm"), maxnorm); | |
} | |
return result; | |
} | |
Tensor & VariableType::renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) const { | |
profiler::RecordFunction profiler("renorm_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RenormBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RenormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->p = p; | |
grad_fn->dim = dim; | |
grad_fn->maxnorm = maxnorm; | |
} | |
baseType->renorm_(self_, p, dim, maxnorm); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "renorm", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("maxnorm"), maxnorm); | |
} | |
return self; | |
} | |
Tensor VariableType::s_dist(const Tensor & self, const Tensor & other, Scalar p) const { | |
profiler::RecordFunction profiler("dist"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<DistBackward> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<DistBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
grad_fn->p = p; | |
} | |
auto result = as_variable(baseType->s_dist(self_, other_, p)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "dist", { self, other }, { result } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::reciprocal(const Tensor & self) const { | |
profiler::RecordFunction profiler("reciprocal"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReciprocalBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReciprocalBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->reciprocal(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor & VariableType::reciprocal_(Tensor & self) const { | |
profiler::RecordFunction profiler("reciprocal_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ReciprocalBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReciprocalBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->reciprocal_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reciprocal", { self }, { self } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::neg(const Tensor & self) const { | |
profiler::RecordFunction profiler("neg"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<NegBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NegBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->neg(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::neg_(Tensor & self) const { | |
profiler::RecordFunction profiler("neg_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<NegBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NegBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->neg_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "neg", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::s_atan2(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("atan2"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<Atan2Backward> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<Atan2Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->s_atan2(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::s_atan2_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("atan2_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<Atan2Backward> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<Atan2Backward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_atan2_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "atan2", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::pow(const Tensor & self, Scalar exponent) const { | |
profiler::RecordFunction profiler("pow"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PowBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PowBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->exponent = exponent; | |
} | |
auto result = as_variable(baseType->pow(self_, exponent)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("exponent"), exponent); | |
} | |
return result; | |
} | |
Tensor VariableType::s_pow(const Tensor & self, const Tensor & exponent) const { | |
profiler::RecordFunction profiler("pow"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& exponent_ = unpack(exponent, "exponent", 1); | |
std::shared_ptr<PowBackward1> grad_fn; | |
if (compute_requires_grad({ self, exponent })) { | |
grad_fn = std::make_shared<PowBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, exponent }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->exponent_ = SavedVariable(exponent, false); | |
} | |
auto result = as_variable(baseType->s_pow(self_, exponent_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, exponent )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::pow_(Tensor & self, Scalar exponent) const { | |
profiler::RecordFunction profiler("pow_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<PowBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PowBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->exponent = exponent; | |
} | |
baseType->pow_(self_, exponent); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("exponent"), exponent); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_pow_(Tensor & self, const Tensor & exponent) const { | |
profiler::RecordFunction profiler("pow_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& exponent_ = unpack(exponent, "exponent", 1); | |
check_inplace(self); | |
std::shared_ptr<PowBackward1> grad_fn; | |
if (compute_requires_grad({ self, exponent })) { | |
grad_fn = std::make_shared<PowBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, exponent }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->exponent_ = SavedVariable(exponent, false); | |
} | |
baseType->s_pow_(self_, exponent_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, exponent )) { | |
jit::Node *n = jit::tracer::recordTrace( "pow", { self, exponent }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::s_lerp(const Tensor & self, const Tensor & end, Scalar weight) const { | |
profiler::RecordFunction profiler("lerp"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& end_ = unpack(end, "end", 1); | |
std::shared_ptr<LerpBackward> grad_fn; | |
if (compute_requires_grad({ self, end })) { | |
grad_fn = std::make_shared<LerpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, end }); | |
grad_fn->weight = weight; | |
} | |
auto result = as_variable(baseType->s_lerp(self_, end_, weight)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, end )) { | |
jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { result } ); | |
setattr(n, jit::stringToSymbol("weight"), weight); | |
} | |
return result; | |
} | |
Tensor & VariableType::s_lerp_(Tensor & self, const Tensor & end, Scalar weight) const { | |
profiler::RecordFunction profiler("lerp_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& end_ = unpack(end, "end", 1); | |
check_inplace(self); | |
std::shared_ptr<LerpBackward> grad_fn; | |
if (compute_requires_grad({ self, end })) { | |
grad_fn = std::make_shared<LerpBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, end }); | |
grad_fn->weight = weight; | |
} | |
baseType->s_lerp_(self_, end_, weight); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, end )) { | |
jit::Node *n = jit::tracer::recordTrace( "lerp", { self, end }, { self } ); | |
setattr(n, jit::stringToSymbol("weight"), weight); | |
} | |
return self; | |
} | |
Tensor VariableType::linspace(Scalar start, Scalar end, int64_t steps) const { | |
profiler::RecordFunction profiler("linspace"); | |
auto result = as_variable(baseType->linspace(start, end, steps)); | |
return result; | |
} | |
Tensor VariableType::logspace(Scalar start, Scalar end, int64_t steps) const { | |
profiler::RecordFunction profiler("logspace"); | |
auto result = as_variable(baseType->logspace(start, end, steps)); | |
return result; | |
} | |
Tensor VariableType::histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("histc"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<HistcBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<HistcBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->histc(self_, bins, min, max)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "histc", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("bins"), bins); | |
setattr(n, jit::stringToSymbol("min"), min); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return result; | |
} | |
Tensor & VariableType::zero_(Tensor & self) const { | |
profiler::RecordFunction profiler("zero_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ZeroBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ZeroBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->zero_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "zero", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::sum(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("sum"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SumBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SumBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto result = as_variable(baseType->sum(self_, dim, keepdim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
return result; | |
} | |
Tensor VariableType::sum(const Tensor & self) const { | |
profiler::RecordFunction profiler("sum"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SumBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SumBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto result = as_variable(baseType->sum(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sum", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::prod(const Tensor & self, int64_t dim, bool keepdim) const { | |
profiler::RecordFunction profiler("prod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ProdBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ProdBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->keepdim = keepdim; | |
} | |
auto result = as_variable(baseType->prod(self_, dim, keepdim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("keepdim"), keepdim); | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::prod(const Tensor & self) const { | |
profiler::RecordFunction profiler("prod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ProdBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ProdBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->prod(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "prod", { self }, { result } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result_ = SavedVariable(result, true); | |
} | |
return result; | |
} | |
Tensor VariableType::cumsum(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("cumsum"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CumsumBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CumsumBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
} | |
auto result = as_variable(baseType->cumsum(self_, dim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cumsum", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor VariableType::cumprod(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("cumprod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<CumprodBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CumprodBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto result = as_variable(baseType->cumprod(self_, dim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cumprod", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor VariableType::sign(const Tensor & self) const { | |
profiler::RecordFunction profiler("sign"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SignBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SignBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->sign(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::sign_(Tensor & self) const { | |
profiler::RecordFunction profiler("sign_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SignBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SignBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sign_(self_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sign", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::trace(const Tensor & self) const { | |
profiler::RecordFunction profiler("trace"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TraceBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TraceBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto result = as_variable(baseType->trace(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "trace", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::add(const Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AddBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AddBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->add(self_, other, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor VariableType::s_add(const Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<AddBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<AddBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
auto result = as_variable(baseType->s_add(self_, other_, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { result } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor VariableType::add(const Tensor & self, SparseTensor other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add"); | |
auto& self_ = unpack(self, "self", 0); | |
auto other_ = unpack(other, "other", 1); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for add is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->add(self_, other_, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::add_(Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AddBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AddBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->add_(self_, other, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_add_(Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<AddBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<AddBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
baseType->s_add_(self_, other_, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self, other }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor & VariableType::add_(Tensor & self, SparseTensor other, Scalar alpha) const { | |
profiler::RecordFunction profiler("add_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for add_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->add_(self_, other_, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "add", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::sub(const Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SubBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SubBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->sub(self_, other, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor VariableType::s_sub(const Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<SubBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<SubBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
auto result = as_variable(baseType->s_sub(self_, other_, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { result } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::sub_(Tensor & self, Scalar other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SubBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SubBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->sub_(self_, other, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_sub_(Tensor & self, const Tensor & other, Scalar alpha) const { | |
profiler::RecordFunction profiler("sub_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<SubBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<SubBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->alpha = alpha; | |
} | |
baseType->s_sub_(self_, other_, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "sub", { self, other }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::mul(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("mul"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MulBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MulBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
auto result = as_variable(baseType->mul(self_, other)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_mul(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("mul"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<MulBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<MulBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->s_mul(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::mul_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("mul_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<MulBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MulBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
baseType->mul_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_mul_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("mul_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<MulBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<MulBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_mul_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "mul", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::div(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("div"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DivBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<DivBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
auto result = as_variable(baseType->div(self_, other)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_div(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("div"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<DivBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<DivBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->s_div(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::div_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("div_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<DivBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<DivBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->other = other; | |
} | |
baseType->div_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_div_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("div_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<DivBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<DivBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_div_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "div", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::fmod(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("fmod"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<FmodBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FmodBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->fmod(self_, other)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_fmod(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("fmod"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<FmodBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<FmodBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->s_fmod(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::fmod_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("fmod_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<FmodBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FmodBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->fmod_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_fmod_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("fmod_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
std::shared_ptr<FmodBackward1> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<FmodBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
baseType->s_fmod_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "fmod", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::remainder(const Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("remainder"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<RemainderBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RemainderBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->remainder(self_, other)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return result; | |
} | |
Tensor VariableType::s_remainder(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("remainder"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_no_requires_grad(other, "other"); | |
std::shared_ptr<RemainderBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RemainderBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->s_remainder(self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::remainder_(Tensor & self, Scalar other) const { | |
profiler::RecordFunction profiler("remainder_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RemainderBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RemainderBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->remainder_(self_, other); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("other"), other); | |
} | |
return self; | |
} | |
Tensor & VariableType::s_remainder_(Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("remainder_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
check_inplace(self); | |
check_no_requires_grad(other, "other"); | |
std::shared_ptr<RemainderBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RemainderBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->s_remainder_(self_, other_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "remainder", { self, other }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::clamp(const Tensor & self, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("clamp"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ClampBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ClampBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min = min; | |
grad_fn->max = max; | |
} | |
auto result = as_variable(baseType->clamp(self_, min, max)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return result; | |
} | |
Tensor & VariableType::clamp_(Tensor & self, Scalar min, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ClampBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ClampBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->min = min; | |
grad_fn->max = max; | |
} | |
baseType->clamp_(self_, min, max); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return self; | |
} | |
Tensor VariableType::clamp_min(const Tensor & self, Scalar min) const { | |
profiler::RecordFunction profiler("clamp_min"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ClampMinBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ClampMinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min = min; | |
} | |
auto result = as_variable(baseType->clamp_min(self_, min)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
} | |
return result; | |
} | |
Tensor & VariableType::clamp_min_(Tensor & self, Scalar min) const { | |
profiler::RecordFunction profiler("clamp_min_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ClampMinBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ClampMinBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->min = min; | |
} | |
baseType->clamp_min_(self_, min); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_min", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min"), min); | |
} | |
return self; | |
} | |
Tensor VariableType::clamp_max(const Tensor & self, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_max"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ClampMaxBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ClampMaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->max = max; | |
} | |
auto result = as_variable(baseType->clamp_max(self_, max)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return result; | |
} | |
Tensor & VariableType::clamp_max_(Tensor & self, Scalar max) const { | |
profiler::RecordFunction profiler("clamp_max_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ClampMaxBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ClampMaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self.clone(), false); | |
grad_fn->max = max; | |
} | |
baseType->clamp_max_(self_, max); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "clamp_max", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("max"), max); | |
} | |
return self; | |
} | |
Tensor VariableType::dot(const Tensor & self, const Tensor & tensor) const { | |
profiler::RecordFunction profiler("dot"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor_ = unpack(tensor, "tensor", 1); | |
std::shared_ptr<DotBackward> grad_fn; | |
if (compute_requires_grad({ self, tensor })) { | |
grad_fn = std::make_shared<DotBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor }); | |
grad_fn->tensor_ = SavedVariable(tensor, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->dot(self_, tensor_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, tensor )) { | |
jit::Node *n = jit::tracer::recordTrace( "dot", { self, tensor }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::tril(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("tril"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TrilBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TrilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
auto result = as_variable(baseType->tril(self_, diagonal)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return result; | |
} | |
Tensor & VariableType::tril_(Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("tril_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TrilBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TrilBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
baseType->tril_(self_, diagonal); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "tril", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return self; | |
} | |
Tensor VariableType::triu(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("triu"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<TriuBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TriuBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
auto result = as_variable(baseType->triu(self_, diagonal)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return result; | |
} | |
Tensor & VariableType::triu_(Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("triu_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<TriuBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<TriuBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
baseType->triu_(self_, diagonal); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "triu", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return self; | |
} | |
Tensor VariableType::cross(const Tensor & self, const Tensor & other, int64_t dim) const { | |
profiler::RecordFunction profiler("cross"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& other_ = unpack(other, "other", 1); | |
std::shared_ptr<CrossBackward> grad_fn; | |
if (compute_requires_grad({ self, other })) { | |
grad_fn = std::make_shared<CrossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, other }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->other_ = SavedVariable(other, false); | |
} | |
auto result = as_variable(baseType->cross(self_, other_, dim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "cross", { self, other }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor VariableType::eye(int64_t n, int64_t m) const { | |
profiler::RecordFunction profiler("eye"); | |
auto result = as_variable(baseType->eye(n, m)); | |
return result; | |
} | |
Tensor VariableType::diag(const Tensor & self, int64_t diagonal) const { | |
profiler::RecordFunction profiler("diag"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DiagBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<DiagBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->diagonal = diagonal; | |
} | |
auto result = as_variable(baseType->diag(self_, diagonal)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "diag", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("diagonal"), diagonal); | |
} | |
return result; | |
} | |
Tensor VariableType::s_addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat1_ = unpack(mat1, "mat1", 1); | |
auto& mat2_ = unpack(mat2, "mat2", 2); | |
std::shared_ptr<AddmmBackward> grad_fn; | |
if (compute_requires_grad({ self, mat1, mat2 })) { | |
grad_fn = std::make_shared<AddmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 }); | |
grad_fn->mat1_sizes = mat1.sizes(); | |
grad_fn->mat1_ = SavedVariable(mat1, false); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->mat2_sizes = mat2.sizes(); | |
grad_fn->beta = beta; | |
} | |
auto result = as_variable(baseType->s_addmm(self_, mat1_, mat2_, beta, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, mat1, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { result } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat1_ = unpack(mat1, "mat1", 1); | |
auto& mat2_ = unpack(mat2, "mat2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddmmBackward> grad_fn; | |
if (compute_requires_grad({ self, mat1, mat2 })) { | |
grad_fn = std::make_shared<AddmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat1, mat2 }); | |
grad_fn->mat1_sizes = mat1.sizes(); | |
grad_fn->mat1_ = SavedVariable(mat1, false); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->mat2_sizes = mat2.sizes(); | |
grad_fn->beta = beta; | |
} | |
baseType->addmm_(self_, mat1_, mat2_, beta, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, mat1, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmm", { self, mat1, mat2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat_ = unpack(mat, "mat", 1); | |
auto& vec_ = unpack(vec, "vec", 2); | |
std::shared_ptr<AddmvBackward> grad_fn; | |
if (compute_requires_grad({ self, mat, vec })) { | |
grad_fn = std::make_shared<AddmvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat, vec }); | |
grad_fn->vec_ = SavedVariable(vec, false); | |
grad_fn->alpha = alpha; | |
grad_fn->beta = beta; | |
grad_fn->mat_ = SavedVariable(mat, false); | |
} | |
auto result = as_variable(baseType->s_addmv(self_, mat_, vec_, beta, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, mat, vec )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { result } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addmv_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat_ = unpack(mat, "mat", 1); | |
auto& vec_ = unpack(vec, "vec", 2); | |
check_inplace(self); | |
std::shared_ptr<AddmvBackward> grad_fn; | |
if (compute_requires_grad({ self, mat, vec })) { | |
grad_fn = std::make_shared<AddmvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat, vec }); | |
grad_fn->vec_ = SavedVariable(vec, false); | |
grad_fn->alpha = alpha; | |
grad_fn->beta = beta; | |
grad_fn->mat_ = SavedVariable(mat, false); | |
} | |
baseType->addmv_(self_, mat_, vec_, beta, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, mat, vec )) { | |
jit::Node *n = jit::tracer::recordTrace( "addmv", { self, mat, vec }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addr"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec1_ = unpack(vec1, "vec1", 1); | |
auto& vec2_ = unpack(vec2, "vec2", 2); | |
std::shared_ptr<AddrBackward> grad_fn; | |
if (compute_requires_grad({ self, vec1, vec2 })) { | |
grad_fn = std::make_shared<AddrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 }); | |
grad_fn->beta = beta; | |
grad_fn->vec2_ = SavedVariable(vec2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->vec1_ = SavedVariable(vec1, false); | |
} | |
auto result = as_variable(baseType->s_addr(self_, vec1_, vec2_, beta, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, vec1, vec2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { result } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addr_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec1_ = unpack(vec1, "vec1", 1); | |
auto& vec2_ = unpack(vec2, "vec2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddrBackward> grad_fn; | |
if (compute_requires_grad({ self, vec1, vec2 })) { | |
grad_fn = std::make_shared<AddrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec1, vec2 }); | |
grad_fn->beta = beta; | |
grad_fn->vec2_ = SavedVariable(vec2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->vec1_ = SavedVariable(vec1, false); | |
} | |
baseType->addr_(self_, vec1_, vec2_, beta, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, vec1, vec2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addr", { self, vec1, vec2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::ger(const Tensor & self, const Tensor & vec2) const { | |
profiler::RecordFunction profiler("ger"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec2_ = unpack(vec2, "vec2", 1); | |
std::shared_ptr<GerBackward> grad_fn; | |
if (compute_requires_grad({ self, vec2 })) { | |
grad_fn = std::make_shared<GerBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec2 }); | |
grad_fn->vec2_ = SavedVariable(vec2, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->ger(self_, vec2_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, vec2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "ger", { self, vec2 }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::mv(const Tensor & self, const Tensor & vec) const { | |
profiler::RecordFunction profiler("mv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& vec_ = unpack(vec, "vec", 1); | |
std::shared_ptr<MvBackward> grad_fn; | |
if (compute_requires_grad({ self, vec })) { | |
grad_fn = std::make_shared<MvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, vec }); | |
grad_fn->vec_ = SavedVariable(vec, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto result = as_variable(baseType->mv(self_, vec_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, vec )) { | |
jit::Node *n = jit::tracer::recordTrace( "mv", { self, vec }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::mm(const Tensor & self, const Tensor & mat2) const { | |
profiler::RecordFunction profiler("mm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat2_ = unpack(mat2, "mat2", 1); | |
std::shared_ptr<MmBackward> grad_fn; | |
if (compute_requires_grad({ self, mat2 })) { | |
grad_fn = std::make_shared<MmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat2 }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->mat2_sizes = mat2.sizes(); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto result = as_variable(baseType->mm(self_, mat2_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "mm", { self, mat2 }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::bmm(const Tensor & self, const Tensor & mat2) const { | |
profiler::RecordFunction profiler("bmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& mat2_ = unpack(mat2, "mat2", 1); | |
std::shared_ptr<BmmBackward> grad_fn; | |
if (compute_requires_grad({ self, mat2 })) { | |
grad_fn = std::make_shared<BmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, mat2 }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->mat2_ = SavedVariable(mat2, false); | |
} | |
auto result = as_variable(baseType->bmm(self_, mat2_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, mat2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "bmm", { self, mat2 }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::s_addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addbmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
std::shared_ptr<AddbmmBackward> grad_fn; | |
if (compute_requires_grad({ self, batch1, batch2 })) { | |
grad_fn = std::make_shared<AddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch1_argsize_0 = batch1.size(0); | |
grad_fn->batch1_argsize_1 = batch1.size(1); | |
grad_fn->batch2_argsize_2 = batch2.size(2); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
auto result = as_variable(baseType->s_addbmm(self_, batch1_, batch2_, beta, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { result } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("addbmm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddbmmBackward> grad_fn; | |
if (compute_requires_grad({ self, batch1, batch2 })) { | |
grad_fn = std::make_shared<AddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch1_argsize_0 = batch1.size(0); | |
grad_fn->batch1_argsize_1 = batch1.size(1); | |
grad_fn->batch2_argsize_2 = batch2.size(2); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
baseType->addbmm_(self_, batch1_, batch2_, beta, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addbmm", { self, batch1, batch2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("baddbmm"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
std::shared_ptr<BaddbmmBackward> grad_fn; | |
if (compute_requires_grad({ self, batch1, batch2 })) { | |
grad_fn = std::make_shared<BaddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
auto result = as_variable(baseType->s_baddbmm(self_, batch1_, batch2_, beta, alpha)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { result } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return result; | |
} | |
Tensor & VariableType::baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) const { | |
profiler::RecordFunction profiler("baddbmm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& batch1_ = unpack(batch1, "batch1", 1); | |
auto& batch2_ = unpack(batch2, "batch2", 2); | |
check_inplace(self); | |
std::shared_ptr<BaddbmmBackward> grad_fn; | |
if (compute_requires_grad({ self, batch1, batch2 })) { | |
grad_fn = std::make_shared<BaddbmmBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, batch1, batch2 }); | |
grad_fn->batch2_ = SavedVariable(batch2, false); | |
grad_fn->alpha = alpha; | |
grad_fn->batch1_ = SavedVariable(batch1, false); | |
grad_fn->beta = beta; | |
} | |
baseType->baddbmm_(self_, batch1_, batch2_, beta, alpha); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, batch1, batch2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "baddbmm", { self, batch1, batch2 }, { self } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcmul"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
std::shared_ptr<AddcmulBackward> grad_fn; | |
if (compute_requires_grad({ self, tensor1, tensor2 })) { | |
grad_fn = std::make_shared<AddcmulBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
auto result = as_variable(baseType->s_addcmul(self_, tensor1_, tensor2_, value)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { result } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return result; | |
} | |
Tensor & VariableType::s_addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcmul_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddcmulBackward> grad_fn; | |
if (compute_requires_grad({ self, tensor1, tensor2 })) { | |
grad_fn = std::make_shared<AddcmulBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
baseType->s_addcmul_(self_, tensor1_, tensor2_, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcmul", { self, tensor1, tensor2 }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor VariableType::s_addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcdiv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
std::shared_ptr<AddcdivBackward> grad_fn; | |
if (compute_requires_grad({ self, tensor1, tensor2 })) { | |
grad_fn = std::make_shared<AddcdivBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
auto result = as_variable(baseType->s_addcdiv(self_, tensor1_, tensor2_, value)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { result } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return result; | |
} | |
Tensor & VariableType::s_addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const { | |
profiler::RecordFunction profiler("addcdiv_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& tensor1_ = unpack(tensor1, "tensor1", 1); | |
auto& tensor2_ = unpack(tensor2, "tensor2", 2); | |
check_inplace(self); | |
std::shared_ptr<AddcdivBackward> grad_fn; | |
if (compute_requires_grad({ self, tensor1, tensor2 })) { | |
grad_fn = std::make_shared<AddcdivBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, tensor1, tensor2 }); | |
grad_fn->tensor2_ = SavedVariable(tensor2, false); | |
grad_fn->value = value; | |
grad_fn->tensor1_ = SavedVariable(tensor1, false); | |
} | |
baseType->s_addcdiv_(self_, tensor1_, tensor2_, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, tensor1, tensor2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "addcdiv", { self, tensor1, tensor2 }, { self } ); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
std::tuple<Tensor,Tensor> VariableType::gesv(const Tensor & self, const Tensor & A) const { | |
profiler::RecordFunction profiler("gesv"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& A_ = unpack(A, "A", 1); | |
std::shared_ptr<GesvBackward> grad_fn; | |
if (compute_requires_grad({ self, A })) { | |
grad_fn = std::make_shared<GesvBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, A }); | |
grad_fn->A_ = SavedVariable(A, false); | |
} | |
Tensor solution, lu; | |
std::tie(solution, lu) = as_variable(baseType->gesv(self_, A_)); | |
set_history(solution, grad_fn); | |
if (jit::tracer::isTracing( self, A )) { | |
jit::Node *n = jit::tracer::recordTrace( "gesv", { self, A }, { solution, lu } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->solution_ = SavedVariable(solution, true); | |
} | |
return std::make_tuple(std::move(solution), std::move(lu)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::gels(const Tensor & self, const Tensor & A) const { | |
profiler::RecordFunction profiler("gels"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& A_ = unpack(A, "A", 1); | |
std::shared_ptr<GelsBackward> grad_fn; | |
if (compute_requires_grad({ self, A })) { | |
grad_fn = std::make_shared<GelsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, A }); | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->gels(self_, A_)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self, A )) { | |
jit::Node *n = jit::tracer::recordTrace( "gels", { self, A }, { res1, res2 } ); | |
(void)n; | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) const { | |
profiler::RecordFunction profiler("trtrs"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& A_ = unpack(A, "A", 1); | |
std::shared_ptr<TrtrsBackward> grad_fn; | |
if (compute_requires_grad({ self, A })) { | |
grad_fn = std::make_shared<TrtrsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, A }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->A_ = SavedVariable(A, false); | |
grad_fn->upper = upper; | |
grad_fn->transpose = transpose; | |
grad_fn->unitriangular = unitriangular; | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->trtrs(self_, A_, upper, transpose, unitriangular)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self, A )) { | |
jit::Node *n = jit::tracer::recordTrace( "trtrs", { self, A }, { res1, res2 } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
setattr(n, jit::stringToSymbol("transpose"), transpose); | |
setattr(n, jit::stringToSymbol("unitriangular"), unitriangular); | |
} | |
if (grad_fn) { | |
grad_fn->res1_ = SavedVariable(res1, true); | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::symeig(const Tensor & self, bool eigenvectors, bool upper) const { | |
profiler::RecordFunction profiler("symeig"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SymeigBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SymeigBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->symeig(self_, eigenvectors, upper)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "symeig", { self }, { res1, res2 } ); | |
setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::eig(const Tensor & self, bool eigenvectors) const { | |
profiler::RecordFunction profiler("eig"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<EigBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<EigBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->eig(self_, eigenvectors)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "eig", { self }, { res1, res2 } ); | |
setattr(n, jit::stringToSymbol("eigenvectors"), eigenvectors); | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::svd(const Tensor & self, bool some) const { | |
profiler::RecordFunction profiler("svd"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SvdBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SvdBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->some = some; | |
} | |
Tensor res1, res2, res3; | |
std::tie(res1, res2, res3) = as_variable(baseType->svd(self_, some)); | |
set_history({ res1, res2, res3 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "svd", { self }, { res1, res2, res3 } ); | |
setattr(n, jit::stringToSymbol("some"), some); | |
} | |
if (grad_fn) { | |
grad_fn->res1_ = SavedVariable(res1, true); | |
grad_fn->res2_ = SavedVariable(res2, true); | |
grad_fn->res3_ = SavedVariable(res3, true); | |
} | |
return std::make_tuple(std::move(res1), std::move(res2), std::move(res3)); | |
} | |
Tensor VariableType::inverse(const Tensor & self) const { | |
profiler::RecordFunction profiler("inverse"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<InverseBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<InverseBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto output = as_variable(baseType->inverse(self_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "inverse", { self }, { output } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::potrf(const Tensor & self, bool upper) const { | |
profiler::RecordFunction profiler("potrf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PotrfBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PotrfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->upper = upper; | |
} | |
auto output = as_variable(baseType->potrf(self_, upper)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "potrf", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::potrs(const Tensor & self, const Tensor & input2, bool upper) const { | |
profiler::RecordFunction profiler("potrs"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input2_ = unpack(input2, "input2", 1); | |
std::shared_ptr<PotrsBackward> grad_fn; | |
if (compute_requires_grad({ self, input2 })) { | |
grad_fn = std::make_shared<PotrsBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, input2 }); | |
} | |
auto result = as_variable(baseType->potrs(self_, input2_, upper)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, input2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "potrs", { self, input2 }, { result } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
return result; | |
} | |
Tensor VariableType::potri(const Tensor & self, bool upper) const { | |
profiler::RecordFunction profiler("potri"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PotriBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PotriBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto output = as_variable(baseType->potri(self_, upper)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "potri", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::pstrf(const Tensor & self, bool upper, Scalar tol) const { | |
profiler::RecordFunction profiler("pstrf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PstrfBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PstrfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->pstrf(self_, upper, tol)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pstrf", { self }, { res1, res2 } ); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
setattr(n, jit::stringToSymbol("tol"), tol); | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::qr(const Tensor & self) const { | |
profiler::RecordFunction profiler("qr"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<QrBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<QrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->qr(self_)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "qr", { self }, { res1, res2 } ); | |
(void)n; | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::geqrf(const Tensor & self) const { | |
profiler::RecordFunction profiler("geqrf"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<GeqrfBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<GeqrfBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor res1, res2; | |
std::tie(res1, res2) = as_variable(baseType->geqrf(self_)); | |
set_history({ res1, res2 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "geqrf", { self }, { res1, res2 } ); | |
(void)n; | |
} | |
return std::make_tuple(std::move(res1), std::move(res2)); | |
} | |
Tensor VariableType::orgqr(const Tensor & self, const Tensor & input2) const { | |
profiler::RecordFunction profiler("orgqr"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input2_ = unpack(input2, "input2", 1); | |
std::shared_ptr<OrgqrBackward> grad_fn; | |
if (compute_requires_grad({ self, input2 })) { | |
grad_fn = std::make_shared<OrgqrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, input2 }); | |
} | |
auto result = as_variable(baseType->orgqr(self_, input2_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, input2 )) { | |
jit::Node *n = jit::tracer::recordTrace( "orgqr", { self, input2 }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) const { | |
profiler::RecordFunction profiler("ormqr"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& input2_ = unpack(input2, "input2", 1); | |
auto& input3_ = unpack(input3, "input3", 2); | |
std::shared_ptr<OrmqrBackward> grad_fn; | |
if (compute_requires_grad({ self, input2, input3 })) { | |
grad_fn = std::make_shared<OrmqrBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, input2, input3 }); | |
} | |
auto result = as_variable(baseType->ormqr(self_, input2_, input3_, left, transpose)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, input2, input3 )) { | |
jit::Node *n = jit::tracer::recordTrace( "ormqr", { self, input2, input3 }, { result } ); | |
setattr(n, jit::stringToSymbol("left"), left); | |
setattr(n, jit::stringToSymbol("transpose"), transpose); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::btrifact(const Tensor & self, bool pivot) const { | |
profiler::RecordFunction profiler("btrifact"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<BtrifactBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<BtrifactBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor result, pivots; | |
std::tie(result, pivots) = as_variable(baseType->btrifact(self_, pivot)); | |
set_history({ result, pivots }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "btrifact", { self }, { result, pivots } ); | |
setattr(n, jit::stringToSymbol("pivot"), pivot); | |
} | |
return std::make_tuple(std::move(result), std::move(pivots)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::btrifact_with_info(const Tensor & self, bool pivot) const { | |
profiler::RecordFunction profiler("btrifact_with_info"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<BtrifactWithInfoBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<BtrifactWithInfoBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
Tensor result, pivots, info; | |
std::tie(result, pivots, info) = as_variable(baseType->btrifact_with_info(self_, pivot)); | |
set_history({ result, pivots, info }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "btrifact_with_info", { self }, { result, pivots, info } ); | |
setattr(n, jit::stringToSymbol("pivot"), pivot); | |
} | |
return std::make_tuple(std::move(result), std::move(pivots), std::move(info)); | |
} | |
Tensor VariableType::btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) const { | |
profiler::RecordFunction profiler("btrisolve"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& LU_data_ = unpack(LU_data, "LU_data", 1); | |
auto& LU_pivots_ = unpack(LU_pivots, "LU_pivots", 2); | |
check_no_requires_grad(LU_data, "LU_data"); | |
check_no_requires_grad(LU_pivots, "LU_pivots"); | |
std::shared_ptr<BtrisolveBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<BtrisolveBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->btrisolve(self_, LU_data_, LU_pivots_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, LU_data, LU_pivots )) { | |
jit::Node *n = jit::tracer::recordTrace( "btrisolve", { self, LU_data, LU_pivots }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::randperm(int64_t n, Generator * generator) const { | |
profiler::RecordFunction profiler("randperm"); | |
auto result = as_variable(baseType->randperm(n, generator)); | |
return result; | |
} | |
Tensor & VariableType::random_(Tensor & self, int64_t from, int64_t to, Generator * generator) const { | |
profiler::RecordFunction profiler("random_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RandomBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RandomBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->random_(self_, from, to, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::random_(Tensor & self, int64_t to, Generator * generator) const { | |
profiler::RecordFunction profiler("random_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RandomBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RandomBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->random_(self_, to, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::random_(Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("random_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<RandomBackward2> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RandomBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->random_(self_, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor VariableType::multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) const { | |
profiler::RecordFunction profiler("multinomial"); | |
auto& self_ = unpack(self, "self", 0); | |
auto result = as_variable(baseType->multinomial(self_, num_samples, replacement, generator)); | |
return result; | |
} | |
Tensor & VariableType::uniform_(Tensor & self, double from, double to, Generator * generator) const { | |
profiler::RecordFunction profiler("uniform_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<UniformBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UniformBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->uniform_(self_, from, to, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor VariableType::normal(const Tensor & mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& mean_ = unpack(mean, "mean", 0); | |
std::shared_ptr<NormalBackward1> grad_fn; | |
if (compute_requires_grad({ mean })) { | |
grad_fn = std::make_shared<NormalBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ mean }); | |
grad_fn->mean_sizes = mean.sizes(); | |
} | |
auto output = as_variable(baseType->normal(mean_, std, generator)); | |
set_history(output, grad_fn); | |
return output; | |
} | |
Tensor VariableType::normal(double mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& std_ = unpack(std, "std", 1); | |
std::shared_ptr<NormalBackward2> grad_fn; | |
if (compute_requires_grad({ std })) { | |
grad_fn = std::make_shared<NormalBackward2>(); | |
grad_fn->next_functions = compute_next_functions({ std }); | |
grad_fn->std_sizes = std.sizes(); | |
} | |
auto output = as_variable(baseType->normal(mean, std_, generator)); | |
set_history(output, grad_fn); | |
return output; | |
} | |
Tensor VariableType::normal(const Tensor & mean, const Tensor & std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal"); | |
auto& mean_ = unpack(mean, "mean", 0); | |
auto& std_ = unpack(std, "std", 1); | |
std::shared_ptr<NormalBackward3> grad_fn; | |
if (compute_requires_grad({ mean, std })) { | |
grad_fn = std::make_shared<NormalBackward3>(); | |
grad_fn->next_functions = compute_next_functions({ mean, std }); | |
grad_fn->mean_sizes = mean.sizes(); | |
grad_fn->std_sizes = std.sizes(); | |
} | |
auto output = as_variable(baseType->normal(mean_, std_, generator)); | |
set_history(output, grad_fn); | |
return output; | |
} | |
Tensor & VariableType::normal_(Tensor & self, double mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("normal_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<NormalBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NormalBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->normal_(self_, mean, std, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::cauchy_(Tensor & self, double median, double sigma, Generator * generator) const { | |
profiler::RecordFunction profiler("cauchy_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<CauchyBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<CauchyBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->cauchy_(self_, median, sigma, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::log_normal_(Tensor & self, double mean, double std, Generator * generator) const { | |
profiler::RecordFunction profiler("log_normal_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LogNormalBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LogNormalBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->log_normal_(self_, mean, std, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor & VariableType::exponential_(Tensor & self, double lambd, Generator * generator) const { | |
profiler::RecordFunction profiler("exponential_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ExponentialBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ExponentialBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->exponential_(self_, lambd, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor VariableType::rand(IntList size, Generator * generator) const { | |
profiler::RecordFunction profiler("rand"); | |
auto result = as_variable(baseType->rand(size, generator)); | |
return result; | |
} | |
Tensor VariableType::randn(IntList size, Generator * generator) const { | |
profiler::RecordFunction profiler("randn"); | |
auto result = as_variable(baseType->randn(size, generator)); | |
return result; | |
} | |
Tensor & VariableType::geometric_(Tensor & self, double p, Generator * generator) const { | |
profiler::RecordFunction profiler("geometric_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<GeometricBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<GeometricBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->geometric_(self_, p, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
return self; | |
} | |
Tensor VariableType::bernoulli(const Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<BernoulliBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<BernoulliBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto output = as_variable(baseType->bernoulli(self_, generator)); | |
set_history(output, grad_fn); | |
return output; | |
} | |
Tensor VariableType::_standard_gamma(const Tensor & self, Generator * generator) const { | |
profiler::RecordFunction profiler("_standard_gamma"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<StandardGammaBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<StandardGammaBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto output = as_variable(baseType->_standard_gamma(self_, generator)); | |
set_history(output, grad_fn); | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::tensor(Storage & storage, int64_t storageOffset, IntList size, IntList stride) const { | |
profiler::RecordFunction profiler("tensor"); | |
auto result = as_variable(baseType->tensor(storage, storageOffset, size, stride)); | |
return result; | |
} | |
Tensor VariableType::tensor(IntList size) const { | |
profiler::RecordFunction profiler("tensor"); | |
auto result = as_variable(baseType->tensor(size)); | |
return result; | |
} | |
Tensor VariableType::tensor(IntList size, IntList stride) const { | |
profiler::RecordFunction profiler("tensor"); | |
auto result = as_variable(baseType->tensor(size, stride)); | |
return result; | |
} | |
Tensor VariableType::tensor() const { | |
profiler::RecordFunction profiler("tensor"); | |
auto result = as_variable(baseType->tensor()); | |
return result; | |
} | |
Tensor VariableType::sparse_coo_tensor(const Tensor & indices, const Tensor & values) const { | |
profiler::RecordFunction profiler("sparse_coo_tensor"); | |
auto& indices_ = unpack(indices, "indices", 0); | |
auto& values_ = unpack(values, "values", 1); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ indices, values })) { | |
grad_fn = std::make_shared<Error>("the derivative for sparse_coo_tensor is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ indices, values }); | |
} | |
auto result = as_variable(baseType->sparse_coo_tensor(indices_, values_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( indices, values )) { | |
jit::Node *n = jit::tracer::recordTrace( "sparse_coo_tensor", { indices, values }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::alias(const Tensor & self) const { | |
profiler::RecordFunction profiler("alias"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AliasBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AliasBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_view(self, baseType->alias(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "alias", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::_copy_ignoring_overlaps_(Tensor & self, const Tensor & src) const { | |
profiler::RecordFunction profiler("_copy_ignoring_overlaps_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& src_ = unpack(src, "src", 1); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, src })) { | |
grad_fn = std::make_shared<Error>("the derivative for _copy_ignoring_overlaps_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, src }); | |
} | |
baseType->_copy_ignoring_overlaps_(self_, src_); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, src )) { | |
jit::Node *n = jit::tracer::recordTrace( "_copy_ignoring_overlaps", { self, src }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor VariableType::as_strided(const Tensor & self, IntList size, IntList stride, int64_t storage_offset) const { | |
profiler::RecordFunction profiler("as_strided"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AsStridedBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AsStridedBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_geometry = TensorGeometry(self); | |
grad_fn->size = size; | |
grad_fn->stride = stride; | |
grad_fn->storage_offset = storage_offset; | |
} | |
auto result = as_view(self, baseType->as_strided(self_, size, stride, storage_offset)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("storage_offset"), storage_offset); | |
} | |
return result; | |
} | |
Tensor & VariableType::as_strided_(Tensor & self, IntList size, IntList stride, int64_t storage_offset) const { | |
profiler::RecordFunction profiler("as_strided_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<AsStridedBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AsStridedBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_geometry = TensorGeometry(self); | |
grad_fn->size = size; | |
grad_fn->stride = stride; | |
grad_fn->storage_offset = storage_offset; | |
} | |
baseType->as_strided_(self_, size, stride, storage_offset); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "as_strided", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("storage_offset"), storage_offset); | |
} | |
return self; | |
} | |
Tensor VariableType::cat(TensorList tensors, int64_t dim) const { | |
profiler::RecordFunction profiler("cat"); | |
auto tensors_ = unpack(tensors, "tensors", 0); | |
std::shared_ptr<CatBackward> grad_fn; | |
if (compute_requires_grad({ tensors })) { | |
grad_fn = std::make_shared<CatBackward>(); | |
grad_fn->next_functions = compute_next_functions({ tensors }); | |
grad_fn->tensors_sizes_dim = to_arg_sizes(tensors, dim); | |
grad_fn->dim = dim; | |
} | |
auto self = as_variable(baseType->cat(tensors_, dim)); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( tensors )) { | |
jit::Node *n = jit::tracer::recordTrace( "cat", flatten( tensors ), { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor & VariableType::reshape_(Tensor & self, IntList size, IntList stride) const { | |
profiler::RecordFunction profiler("reshape_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for reshape_ is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->reshape_(self_, size, stride); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reshape", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
} | |
return self; | |
} | |
Tensor VariableType::_sparse_mask(const Tensor & self, SparseTensor mask) const { | |
profiler::RecordFunction profiler("_sparse_mask"); | |
auto& self_ = unpack(self, "self", 0); | |
auto mask_ = unpack(mask, "mask", 1); | |
std::shared_ptr<SparseMaskBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SparseMaskBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->_sparse_mask(self_, mask_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_sparse_mask", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("mask"), mask); | |
} | |
return result; | |
} | |
Tensor VariableType::_indices(const Tensor & self) const { | |
profiler::RecordFunction profiler("_indices"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for _indices is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->_indices(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_indices", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::_values(const Tensor & self) const { | |
profiler::RecordFunction profiler("_values"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for _values is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->_values(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_values", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy"); | |
auto output = Type::binary_cross_entropy(self, target, weight, size_average, reduce); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy", { self, target, weight }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::binary_cross_entropy_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
check_no_requires_grad(target, "target"); | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<BinaryCrossEntropyBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<BinaryCrossEntropyBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto output = as_variable(baseType->binary_cross_entropy_forward(self_, target_, weight_, size_average, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_forward", { self, target, weight }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("binary_cross_entropy_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ grad_output, self, target, weight })) { | |
grad_fn = std::make_shared<Error>("the derivative for binary_cross_entropy_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, target, weight }); | |
} | |
auto grad_input = as_variable(baseType->binary_cross_entropy_backward(grad_output_, self_, target_, weight_, size_average, reduce)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "binary_cross_entropy_backward", { grad_output, self, target, weight }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::kl_div(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div"); | |
auto output = Type::kl_div(self, target, size_average, reduce); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "kl_div", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::kl_div_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<KlDivBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<KlDivBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto output = as_variable(baseType->kl_div_forward(self_, target_, size_average, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "kl_div_forward", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("kl_div_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<KlDivBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<KlDivBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto grad_input = as_variable(baseType->kl_div_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "kl_div_backward", { grad_output, self, target }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss"); | |
auto output = Type::l1_loss(self, target, size_average, reduce); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "l1_loss", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::l1_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<L1LossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<L1LossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto output = as_variable(baseType->l1_loss_forward(self_, target_, size_average, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "l1_loss_forward", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("l1_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<L1LossBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<L1LossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto grad_input = as_variable(baseType->l1_loss_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "l1_loss_backward", { grad_output, self, target }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::mse_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss"); | |
auto output = Type::mse_loss(self, target, size_average, reduce); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "mse_loss", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::mse_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<MseLossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MseLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto output = as_variable(baseType->mse_loss_forward(self_, target_, size_average, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "mse_loss_forward", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("mse_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<MseLossBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<MseLossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto grad_input = as_variable(baseType->mse_loss_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "mse_loss_backward", { grad_output, self, target }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss"); | |
auto output = Type::multi_margin_loss(self, target, p, margin, weight, size_average); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss", { self, target, weight }, { output } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("margin"), margin); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return output; | |
} | |
Tensor VariableType::multi_margin_loss_forward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 4); | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<MultiMarginLossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MultiMarginLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->p = p; | |
grad_fn->margin = margin; | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
} | |
auto output = as_variable(baseType->multi_margin_loss_forward(self_, target_, p, margin, weight_, size_average)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_forward", { self, target, weight }, { output } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("margin"), margin); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return output; | |
} | |
Tensor VariableType::multi_margin_loss_backward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, bool size_average) const { | |
profiler::RecordFunction profiler("multi_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 4); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, target, weight })) { | |
grad_fn = std::make_shared<Error>("the derivative for multi_margin_loss_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, target, weight }); | |
} | |
auto grad_input = as_variable(baseType->multi_margin_loss_backward(self_, target_, p, margin, weight_, size_average)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "multi_margin_loss_backward", { self, target, weight }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("p"), p); | |
setattr(n, jit::stringToSymbol("margin"), margin); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::multilabel_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss"); | |
auto output = Type::multilabel_margin_loss(self, target, size_average); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
std::shared_ptr<MultilabelMarginLossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MultilabelMarginLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
} | |
Tensor output, is_target; | |
std::tie(output, is_target) = as_variable(baseType->multilabel_margin_loss_forward(self_, target_, size_average)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_forward", { self, target }, { output, is_target } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
if (grad_fn) { | |
grad_fn->is_target_ = SavedVariable(is_target, true); | |
} | |
return std::make_tuple(std::move(output), std::move(is_target)); | |
} | |
Tensor VariableType::multilabel_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average, const Tensor & is_target) const { | |
profiler::RecordFunction profiler("multilabel_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto& is_target_ = unpack(is_target, "is_target", 3); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self, target, is_target })) { | |
grad_fn = std::make_shared<Error>("the derivative for multilabel_margin_loss_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self, target, is_target }); | |
} | |
auto grad_input = as_variable(baseType->multilabel_margin_loss_backward(self_, target_, size_average, is_target_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( self, target, is_target )) { | |
jit::Node *n = jit::tracer::recordTrace( "multilabel_margin_loss_backward", { self, target, is_target }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss"); | |
auto output = Type::nll_loss(self, target, weight, size_average, ignore_index, reduce); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss", { self, target, weight }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<NllLossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NllLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
} | |
Tensor output, total_weight; | |
std::tie(output, total_weight) = as_variable(baseType->nll_loss_forward(self_, target_, weight_, size_average, ignore_index, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss_forward", { self, target, weight }, { output, total_weight } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
if (grad_fn) { | |
grad_fn->total_weight_ = SavedVariable(total_weight, true); | |
} | |
return std::make_tuple(std::move(output), std::move(total_weight)); | |
} | |
Tensor VariableType::nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack_long(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
auto& total_weight_ = unpack(total_weight, "total_weight", 7); | |
check_no_requires_grad(weight, "weight"); | |
check_no_requires_grad(total_weight, "total_weight"); | |
std::shared_ptr<NllLossBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<NllLossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
} | |
auto grad_input = as_variable(baseType->nll_loss_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target, weight, total_weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss_backward", { grad_output, self, target, weight, total_weight }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss2d"); | |
auto output = Type::nll_loss2d(self, target, weight, size_average, ignore_index, reduce); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss2d", { self, target, weight }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce) const { | |
profiler::RecordFunction profiler("nll_loss2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack_long(target, "target", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
check_no_requires_grad(weight, "weight"); | |
std::shared_ptr<NllLoss2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<NllLoss2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
} | |
Tensor output, total_weight; | |
std::tie(output, total_weight) = as_variable(baseType->nll_loss2d_forward(self_, target_, weight_, size_average, ignore_index, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_forward", { self, target, weight }, { output, total_weight } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
if (grad_fn) { | |
grad_fn->total_weight_ = SavedVariable(total_weight, true); | |
} | |
return std::make_tuple(std::move(output), std::move(total_weight)); | |
} | |
Tensor VariableType::nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, bool size_average, int64_t ignore_index, bool reduce, const Tensor & total_weight) const { | |
profiler::RecordFunction profiler("nll_loss2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack_long(target, "target", 2); | |
auto weight_ = unpack_opt(weight, "weight", 3); | |
auto& total_weight_ = unpack(total_weight, "total_weight", 7); | |
check_no_requires_grad(weight, "weight"); | |
check_no_requires_grad(total_weight, "total_weight"); | |
std::shared_ptr<NllLoss2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<NllLoss2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->size_average = size_average; | |
grad_fn->ignore_index = ignore_index; | |
grad_fn->reduce = reduce; | |
} | |
auto grad_input = as_variable(baseType->nll_loss2d_backward(grad_output_, self_, target_, weight_, size_average, ignore_index, reduce, total_weight_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target, weight, total_weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nll_loss2d_backward", { grad_output, self, target, weight, total_weight }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("ignore_index"), ignore_index); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::smooth_l1_loss(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss"); | |
auto output = Type::smooth_l1_loss(self, target, size_average, reduce); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::smooth_l1_loss_forward(const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SmoothL1LossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SmoothL1LossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto output = as_variable(baseType->smooth_l1_loss_forward(self_, target_, size_average, reduce)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_forward", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return output; | |
} | |
Tensor VariableType::smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, bool size_average, bool reduce) const { | |
profiler::RecordFunction profiler("smooth_l1_loss_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& target_ = unpack(target, "target", 2); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SmoothL1LossBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<SmoothL1LossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
grad_fn->reduce = reduce; | |
} | |
auto grad_input = as_variable(baseType->smooth_l1_loss_backward(grad_output_, self_, target_, size_average, reduce)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "smooth_l1_loss_backward", { grad_output, self, target }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
setattr(n, jit::stringToSymbol("reduce"), reduce); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::soft_margin_loss(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss"); | |
auto output = Type::soft_margin_loss(self, target, size_average); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return output; | |
} | |
Tensor VariableType::soft_margin_loss_forward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SoftMarginLossBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SoftMarginLossBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
} | |
auto output = as_variable(baseType->soft_margin_loss_forward(self_, target_, size_average)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_forward", { self, target }, { output } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return output; | |
} | |
Tensor VariableType::soft_margin_loss_backward(const Tensor & self, const Tensor & target, bool size_average) const { | |
profiler::RecordFunction profiler("soft_margin_loss_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& target_ = unpack(target, "target", 1); | |
check_no_requires_grad(target, "target"); | |
std::shared_ptr<SoftMarginLossBackwardBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SoftMarginLossBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->target_ = SavedVariable(target, false); | |
grad_fn->size_average = size_average; | |
} | |
auto grad_input = as_variable(baseType->soft_margin_loss_backward(self_, target_, size_average)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( self, target )) { | |
jit::Node *n = jit::tracer::recordTrace( "soft_margin_loss_backward", { self, target }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("size_average"), size_average); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::elu(const Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu"); | |
auto output = Type::elu(self, alpha, scale); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
return output; | |
} | |
Tensor VariableType::elu_forward(const Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<EluBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<EluBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
} | |
auto output = as_variable(baseType->elu_forward(self_, alpha, scale)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, const Tensor & output) const { | |
profiler::RecordFunction profiler("elu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& output_ = unpack(output, "output", 3); | |
std::shared_ptr<EluBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, output })) { | |
grad_fn = std::make_shared<EluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->elu_backward(grad_output_, alpha, scale, output_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu_backward", { grad_output, output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
return grad_input; | |
} | |
Tensor & VariableType::elu_(Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu_"); | |
Type::elu_(self, alpha, scale); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
return self; | |
} | |
Tensor & VariableType::elu_forward_(Tensor & self, Scalar alpha, Scalar scale) const { | |
profiler::RecordFunction profiler("elu_forward_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<EluBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<EluBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->alpha = alpha; | |
grad_fn->scale = scale; | |
} | |
baseType->elu_forward_(self_, alpha, scale); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "elu_forward", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("alpha"), alpha); | |
setattr(n, jit::stringToSymbol("scale"), scale); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::glu(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu"); | |
auto output = Type::glu(self, dim); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "glu", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return output; | |
} | |
Tensor VariableType::glu_forward(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<GluBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<GluBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto output = as_variable(baseType->glu_forward(self_, dim)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "glu_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return output; | |
} | |
Tensor VariableType::glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("glu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<GluBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<GluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->glu_backward(grad_output_, self_, dim)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "glu_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::hardshrink(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink"); | |
auto output = Type::hardshrink(self, lambd); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardshrink", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return output; | |
} | |
Tensor VariableType::hardshrink_forward(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<HardshrinkBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<HardshrinkBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto output = as_variable(baseType->hardshrink_forward(self_, lambd)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardshrink_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return output; | |
} | |
Tensor VariableType::hardshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("hardshrink_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<HardshrinkBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<HardshrinkBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto grad_input = as_variable(baseType->hardshrink_backward(grad_output_, self_, lambd)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardshrink_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh"); | |
auto output = Type::hardtanh(self, min_val, max_val); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
return output; | |
} | |
Tensor VariableType::hardtanh_forward(const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<HardtanhBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<HardtanhBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min_val = min_val; | |
grad_fn->max_val = max_val; | |
} | |
auto output = as_variable(baseType->hardtanh_forward(self_, min_val, max_val)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
return output; | |
} | |
Tensor VariableType::hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<HardtanhBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<HardtanhBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->min_val = min_val; | |
grad_fn->max_val = max_val; | |
} | |
auto grad_input = as_variable(baseType->hardtanh_backward(grad_output_, self_, min_val, max_val)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
return grad_input; | |
} | |
Tensor & VariableType::hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_"); | |
Type::hardtanh_(self, min_val, max_val); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
return self; | |
} | |
Tensor & VariableType::hardtanh_forward_(Tensor & self, Scalar min_val, Scalar max_val) const { | |
profiler::RecordFunction profiler("hardtanh_forward_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<HardtanhBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<HardtanhBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->min_val = min_val; | |
grad_fn->max_val = max_val; | |
} | |
baseType->hardtanh_forward_(self_, min_val, max_val); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "hardtanh_forward", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("min_val"), min_val); | |
setattr(n, jit::stringToSymbol("max_val"), max_val); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::leaky_relu(const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu"); | |
auto output = Type::leaky_relu(self, negative_slope); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
return output; | |
} | |
Tensor VariableType::leaky_relu_forward(const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LeakyReluBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LeakyReluBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->negative_slope = negative_slope; | |
} | |
auto output = as_variable(baseType->leaky_relu_forward(self_, negative_slope)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
return output; | |
} | |
Tensor VariableType::leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<LeakyReluBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<LeakyReluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->negative_slope = negative_slope; | |
} | |
auto grad_input = as_variable(baseType->leaky_relu_backward(grad_output_, self_, negative_slope)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
return grad_input; | |
} | |
Tensor & VariableType::leaky_relu_(Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_"); | |
Type::leaky_relu_(self, negative_slope); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
return self; | |
} | |
Tensor & VariableType::leaky_relu_forward_(Tensor & self, Scalar negative_slope) const { | |
profiler::RecordFunction profiler("leaky_relu_forward_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<LeakyReluBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LeakyReluBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->negative_slope = negative_slope; | |
} | |
baseType->leaky_relu_forward_(self_, negative_slope); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "leaky_relu_forward", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("negative_slope"), negative_slope); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::log_sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("log_sigmoid"); | |
auto output = Type::log_sigmoid(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_sigmoid", { self }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::log_sigmoid_forward(const Tensor & self) const { | |
profiler::RecordFunction profiler("log_sigmoid_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LogSigmoidBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LogSigmoidBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
Tensor output, buffer; | |
std::tie(output, buffer) = as_variable(baseType->log_sigmoid_forward(self_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_forward", { self }, { output, buffer } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->buffer_ = SavedVariable(buffer, true); | |
} | |
return std::make_tuple(std::move(output), std::move(buffer)); | |
} | |
Tensor VariableType::log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) const { | |
profiler::RecordFunction profiler("log_sigmoid_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& buffer_ = unpack(buffer, "buffer", 2); | |
check_no_requires_grad(buffer, "buffer"); | |
std::shared_ptr<LogSigmoidBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<LogSigmoidBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->buffer_ = SavedVariable(buffer, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->log_sigmoid_backward(grad_output_, self_, buffer_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, buffer )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_sigmoid_backward", { grad_output, self, buffer }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
Tensor VariableType::log_softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("log_softmax"); | |
auto output = Type::log_softmax(self, dim); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_softmax", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return output; | |
} | |
Tensor VariableType::log_softmax_forward(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("log_softmax_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<LogSoftmaxBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<LogSoftmaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto output = as_variable(baseType->log_softmax_forward(self_, dim)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_softmax_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::log_softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
profiler::RecordFunction profiler("log_softmax_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& output_ = unpack(output, "output", 3); | |
std::shared_ptr<LogSoftmaxBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<LogSoftmaxBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->dim = dim; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->log_softmax_backward(grad_output_, self_, dim, output_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "log_softmax_backward", { grad_output, self, output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::prelu(const Tensor & self, const Tensor & weight) const { | |
profiler::RecordFunction profiler("prelu"); | |
auto output = Type::prelu(self, weight); | |
if (jit::tracer::isTracing( self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "prelu", { self, weight }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
Tensor VariableType::prelu_forward(const Tensor & self, const Tensor & weight) const { | |
profiler::RecordFunction profiler("prelu_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
std::shared_ptr<PreluBackward> grad_fn; | |
if (compute_requires_grad({ self, weight })) { | |
grad_fn = std::make_shared<PreluBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
} | |
auto output = as_variable(baseType->prelu_forward(self_, weight_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "prelu_forward", { self, weight }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, std::array<bool,2> output_mask) const { | |
profiler::RecordFunction profiler("prelu_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<PreluBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<PreluBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
} | |
Tensor grad_input, grad_weight; | |
std::tie(grad_input, grad_weight) = as_variable(baseType->prelu_backward(grad_output_, self_, weight_, output_mask)); | |
set_history({ grad_input, grad_weight }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "prelu_backward", { grad_output, self, weight }, { grad_input, grad_weight } ); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight)); | |
} | |
Tensor VariableType::rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise"); | |
auto output = Type::rrelu_with_noise(self, noise, lower, upper, training, generator); | |
return output; | |
} | |
Tensor VariableType::rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& noise_ = unpack(noise, "noise", 1); | |
check_no_requires_grad(noise, "noise"); | |
std::shared_ptr<RreluWithNoiseBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RreluWithNoiseBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->noise_ = SavedVariable(noise, false); | |
grad_fn->lower = lower; | |
grad_fn->upper = upper; | |
grad_fn->training = training; | |
} | |
auto output = as_variable(baseType->rrelu_with_noise_forward(self_, noise_, lower, upper, training, generator)); | |
set_history(output, grad_fn); | |
return output; | |
} | |
Tensor VariableType::rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& noise_ = unpack(noise, "noise", 2); | |
check_no_requires_grad(noise, "noise"); | |
std::shared_ptr<RreluWithNoiseBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<RreluWithNoiseBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->noise_ = SavedVariable(noise, false); | |
grad_fn->lower = lower; | |
grad_fn->upper = upper; | |
grad_fn->training = training; | |
} | |
auto grad_input = as_variable(baseType->rrelu_with_noise_backward(grad_output_, self_, noise_, lower, upper, training)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, noise )) { | |
jit::Node *n = jit::tracer::recordTrace( "rrelu_with_noise_backward", { grad_output, self, noise }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("lower"), lower); | |
setattr(n, jit::stringToSymbol("upper"), upper); | |
setattr(n, jit::stringToSymbol("training"), training); | |
} | |
return grad_input; | |
} | |
Tensor & VariableType::rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_"); | |
Type::rrelu_with_noise_(self, noise, lower, upper, training, generator); | |
return self; | |
} | |
Tensor & VariableType::rrelu_with_noise_forward_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_with_noise_forward_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& noise_ = unpack(noise, "noise", 1); | |
check_inplace(self); | |
check_no_requires_grad(noise, "noise"); | |
std::shared_ptr<RreluWithNoiseBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<RreluWithNoiseBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->noise_ = SavedVariable(noise, false); | |
grad_fn->lower = lower; | |
grad_fn->upper = upper; | |
grad_fn->training = training; | |
} | |
baseType->rrelu_with_noise_forward_(self_, noise_, lower, upper, training, generator); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::softmax(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("softmax"); | |
auto output = Type::softmax(self, dim); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softmax", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return output; | |
} | |
Tensor VariableType::softmax_forward(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("softmax_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SoftmaxBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SoftmaxBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
} | |
auto output = as_variable(baseType->softmax_forward(self_, dim)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softmax_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::softmax_backward(const Tensor & grad_output, const Tensor & self, int64_t dim, const Tensor & output) const { | |
profiler::RecordFunction profiler("softmax_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& output_ = unpack(output, "output", 3); | |
std::shared_ptr<SoftmaxBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<SoftmaxBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->dim = dim; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->softmax_backward(grad_output_, self_, dim, output_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "softmax_backward", { grad_output, self, output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::softplus(const Tensor & self, Scalar beta, Scalar threshold) const { | |
profiler::RecordFunction profiler("softplus"); | |
auto output = Type::softplus(self, beta, threshold); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softplus", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
} | |
return output; | |
} | |
Tensor VariableType::softplus_forward(const Tensor & self, Scalar beta, Scalar threshold) const { | |
profiler::RecordFunction profiler("softplus_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SoftplusBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SoftplusBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->beta = beta; | |
grad_fn->threshold = threshold; | |
} | |
auto output = as_variable(baseType->softplus_forward(self_, beta, threshold)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softplus_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(output, true); | |
} | |
return output; | |
} | |
Tensor VariableType::softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) const { | |
profiler::RecordFunction profiler("softplus_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& output_ = unpack(output, "output", 4); | |
std::shared_ptr<SoftplusBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<SoftplusBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->beta = beta; | |
grad_fn->threshold = threshold; | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->softplus_backward(grad_output_, self_, beta, threshold, output_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "softplus_backward", { grad_output, self, output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("beta"), beta); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::softshrink(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink"); | |
auto output = Type::softshrink(self, lambd); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softshrink", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return output; | |
} | |
Tensor VariableType::softshrink_forward(const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SoftshrinkBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SoftshrinkBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto output = as_variable(baseType->softshrink_forward(self_, lambd)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softshrink_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return output; | |
} | |
Tensor VariableType::softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) const { | |
profiler::RecordFunction profiler("softshrink_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<SoftshrinkBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<SoftshrinkBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->lambd = lambd; | |
} | |
auto grad_input = as_variable(baseType->softshrink_backward(grad_output_, self_, lambd)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "softshrink_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("lambd"), lambd); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::threshold(const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold"); | |
auto output = Type::threshold(self, threshold, value); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return output; | |
} | |
Tensor VariableType::threshold_forward(const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ThresholdBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ThresholdBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->threshold = threshold; | |
grad_fn->value = value; | |
} | |
auto output = as_variable(baseType->threshold_forward(self_, threshold, value)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return output; | |
} | |
Tensor VariableType::threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ThresholdBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<ThresholdBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->threshold = threshold; | |
grad_fn->value = value; | |
} | |
auto grad_input = as_variable(baseType->threshold_backward(grad_output_, self_, threshold, value)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return grad_input; | |
} | |
Tensor & VariableType::threshold_(Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_"); | |
Type::threshold_(self, threshold, value); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
return self; | |
} | |
Tensor & VariableType::threshold_forward_(Tensor & self, Scalar threshold, Scalar value) const { | |
profiler::RecordFunction profiler("threshold_forward_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<ThresholdBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ThresholdBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->threshold = threshold; | |
grad_fn->value = value; | |
} | |
baseType->threshold_forward_(self_, threshold, value); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "threshold_forward", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("threshold"), threshold); | |
setattr(n, jit::stringToSymbol("value"), value); | |
} | |
if (grad_fn) { | |
grad_fn->output_ = SavedVariable(self, true); | |
} | |
return self; | |
} | |
Tensor VariableType::adaptive_avg_pool2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d"); | |
auto output = Type::adaptive_avg_pool2d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::adaptive_avg_pool2d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveAvgPool2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AdaptiveAvgPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto output = as_variable(baseType->adaptive_avg_pool2d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AdaptiveAvgPool2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<AdaptiveAvgPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->adaptive_avg_pool2d_backward(grad_output_, self_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool2d_backward", { grad_output, self }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
Tensor VariableType::adaptive_avg_pool3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d"); | |
auto output = Type::adaptive_avg_pool3d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::adaptive_avg_pool3d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveAvgPool3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AdaptiveAvgPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
auto output = as_variable(baseType->adaptive_avg_pool3d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AdaptiveAvgPool3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<AdaptiveAvgPool3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->adaptive_avg_pool3d_backward(grad_output_, self_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool3d_backward", { grad_output, self }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d"); | |
Tensor output, indices; | |
std::tie(output, indices) = Type::adaptive_max_pool2d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool2d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveMaxPool2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AdaptiveMaxPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
Tensor output, indices; | |
std::tie(output, indices) = as_variable(baseType->adaptive_max_pool2d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_forward", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
Tensor VariableType::adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
profiler::RecordFunction profiler("adaptive_max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<AdaptiveMaxPool2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<AdaptiveMaxPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->adaptive_max_pool2d_backward(grad_output_, self_, indices_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool2d_backward", { grad_output, self, indices }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d"); | |
Tensor output, indices; | |
std::tie(output, indices) = Type::adaptive_max_pool3d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool3d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AdaptiveMaxPool3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AdaptiveMaxPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
Tensor output, indices; | |
std::tie(output, indices) = as_variable(baseType->adaptive_max_pool3d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_forward", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
Tensor VariableType::adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) const { | |
profiler::RecordFunction profiler("adaptive_max_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<AdaptiveMaxPool3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<AdaptiveMaxPool3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->adaptive_max_pool3d_backward(grad_output_, self_, indices_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool3d_backward", { grad_output, self, indices }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
Tensor VariableType::avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d"); | |
auto output = Type::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool2d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return output; | |
} | |
Tensor VariableType::avg_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AvgPool2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AvgPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
} | |
auto output = as_variable(baseType->avg_pool2d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return output; | |
} | |
Tensor VariableType::avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AvgPool2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<AvgPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->avg_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool2d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d"); | |
auto output = Type::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool3d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return output; | |
} | |
Tensor VariableType::avg_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<AvgPool3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<AvgPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
} | |
auto output = as_variable(baseType->avg_pool3d_forward(self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return output; | |
} | |
Tensor VariableType::avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) const { | |
profiler::RecordFunction profiler("avg_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<AvgPool3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<AvgPool3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->ceil_mode = ceil_mode; | |
grad_fn->count_include_pad = count_include_pad; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->avg_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, ceil_mode, count_include_pad)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "avg_pool3d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
setattr(n, jit::stringToSymbol("count_include_pad"), count_include_pad); | |
} | |
return grad_input; | |
} | |
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d"); | |
Tensor output, indices; | |
std::tie(output, indices) = Type::fractional_max_pool2d(self, kernel_size, output_size, random_samples); | |
if (jit::tracer::isTracing( self, random_samples )) { | |
jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d", { self, random_samples }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::fractional_max_pool2d_forward(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& random_samples_ = unpack(random_samples, "random_samples", 3); | |
check_no_requires_grad(random_samples, "random_samples"); | |
std::shared_ptr<FractionalMaxPool2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<FractionalMaxPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->output_size = output_size; | |
} | |
Tensor output, indices; | |
std::tie(output, indices) = as_variable(baseType->fractional_max_pool2d_forward(self_, kernel_size, output_size, random_samples_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, random_samples )) { | |
jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_forward", { self, random_samples }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
Tensor VariableType::fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) const { | |
profiler::RecordFunction profiler("fractional_max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 4); | |
std::shared_ptr<FractionalMaxPool2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<FractionalMaxPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->fractional_max_pool2d_backward(grad_output_, self_, kernel_size, output_size, indices_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "fractional_max_pool2d_backward", { grad_output, self, indices }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return grad_input; | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool2d"); | |
Tensor output, indices; | |
std::tie(output, indices) = Type::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool2d", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool2d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxPool2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaxPool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
grad_fn->ceil_mode = ceil_mode; | |
} | |
Tensor output, indices; | |
std::tie(output, indices) = as_variable(baseType->max_pool2d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool2d_forward", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
Tensor VariableType::max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
profiler::RecordFunction profiler("max_pool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 7); | |
std::shared_ptr<MaxPool2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<MaxPool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->max_pool2d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool2d_backward", { grad_output, self, indices }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return grad_input; | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool3d"); | |
Tensor output, indices; | |
std::tie(output, indices) = Type::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool3d", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool3d_forward(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<MaxPool3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaxPool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
grad_fn->ceil_mode = ceil_mode; | |
} | |
Tensor output, indices; | |
std::tie(output, indices) = as_variable(baseType->max_pool3d_forward(self_, kernel_size, stride, padding, dilation, ceil_mode)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool3d_forward", { self }, { output, indices } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
if (grad_fn) { | |
grad_fn->indices_ = SavedVariable(indices, true); | |
} | |
return std::make_tuple(std::move(output), std::move(indices)); | |
} | |
Tensor VariableType::max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) const { | |
profiler::RecordFunction profiler("max_pool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 7); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ grad_output, self, indices })) { | |
grad_fn = std::make_shared<Error>("the derivative for max_pool3d_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, indices }); | |
} | |
auto grad_input = as_variable(baseType->max_pool3d_backward(grad_output_, self_, kernel_size, stride, padding, dilation, ceil_mode, indices_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool3d_backward", { grad_output, self, indices }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d"); | |
auto output = Type::max_unpool2d(self, indices, output_size); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool2d", { self, indices }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::max_unpool2d_forward(const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<MaxUnpool2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaxUnpool2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->output_size = output_size; | |
} | |
auto output = as_variable(baseType->max_unpool2d_forward(self_, indices_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_forward", { self, indices }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) const { | |
profiler::RecordFunction profiler("max_unpool2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<MaxUnpool2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<MaxUnpool2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->output_size = output_size; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->max_unpool2d_backward(grad_output_, self_, indices_, output_size)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool2d_backward", { grad_output, self, indices }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d"); | |
auto output = Type::max_unpool3d(self, indices, output_size, stride, padding); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool3d", { self, indices }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::max_unpool3d_forward(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<MaxUnpool3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<MaxUnpool3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->output_size = output_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
auto output = as_variable(baseType->max_unpool3d_forward(self_, indices_, output_size, stride, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_forward", { self, indices }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("max_unpool3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& indices_ = unpack_long(indices, "indices", 2); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ grad_output, self, indices })) { | |
grad_fn = std::make_shared<Error>("the derivative for max_unpool3d_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, indices }); | |
} | |
auto grad_input = as_variable(baseType->max_unpool3d_backward(grad_output_, self_, indices_, output_size, stride, padding)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_unpool3d_backward", { grad_output, self, indices }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::reflection_pad1d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d"); | |
auto output = Type::reflection_pad1d(self, padding); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::reflection_pad1d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReflectionPad1DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReflectionPad1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto output = as_variable(baseType->reflection_pad1d_forward(self_, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReflectionPad1DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<ReflectionPad1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->reflection_pad1d_backward(grad_output_, self_, padding)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad1d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::reflection_pad2d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d"); | |
auto output = Type::reflection_pad2d(self, padding); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::reflection_pad2d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReflectionPad2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReflectionPad2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto output = as_variable(baseType->reflection_pad2d_forward(self_, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("reflection_pad2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReflectionPad2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<ReflectionPad2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->reflection_pad2d_backward(grad_output_, self_, padding)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "reflection_pad2d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::replication_pad1d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d"); | |
auto output = Type::replication_pad1d(self, padding); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad1d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::replication_pad1d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReplicationPad1DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReplicationPad1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto output = as_variable(baseType->replication_pad1d_forward(self_, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReplicationPad1DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<ReplicationPad1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->replication_pad1d_backward(grad_output_, self_, padding)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad1d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::replication_pad2d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d"); | |
auto output = Type::replication_pad2d(self, padding); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad2d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::replication_pad2d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReplicationPad2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReplicationPad2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto output = as_variable(baseType->replication_pad2d_forward(self_, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReplicationPad2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<ReplicationPad2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->replication_pad2d_backward(grad_output_, self_, padding)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad2d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::replication_pad3d(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d"); | |
auto output = Type::replication_pad3d(self, padding); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad3d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::replication_pad3d_forward(const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ReplicationPad3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ReplicationPad3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->padding = padding; | |
} | |
auto output = as_variable(baseType->replication_pad3d_forward(self_, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
Tensor VariableType::replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) const { | |
profiler::RecordFunction profiler("replication_pad3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<ReplicationPad3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<ReplicationPad3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->padding = padding; | |
grad_fn->self_info = self; | |
} | |
auto grad_input = as_variable(baseType->replication_pad3d_backward(grad_output_, self_, padding)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "replication_pad3d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::upsample_linear1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d"); | |
auto output = Type::upsample_linear1d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_linear1d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleLinear1DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UpsampleLinear1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->output_size = output_size; | |
} | |
auto output = as_variable(baseType->upsample_linear1d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_linear1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
std::shared_ptr<UpsampleLinear1DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output })) { | |
grad_fn = std::make_shared<UpsampleLinear1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output }); | |
grad_fn->output_size = output_size; | |
} | |
auto grad_input = as_variable(baseType->upsample_linear1d_backward(grad_output_, output_size, input_size)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_linear1d_backward", { grad_output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("input_size"), input_size); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::upsample_bilinear2d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d"); | |
auto output = Type::upsample_bilinear2d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_bilinear2d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleBilinear2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UpsampleBilinear2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->output_size = output_size; | |
} | |
auto output = as_variable(baseType->upsample_bilinear2d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_bilinear2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
std::shared_ptr<UpsampleBilinear2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output })) { | |
grad_fn = std::make_shared<UpsampleBilinear2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output }); | |
grad_fn->output_size = output_size; | |
} | |
auto grad_input = as_variable(baseType->upsample_bilinear2d_backward(grad_output_, output_size, input_size)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_bilinear2d_backward", { grad_output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("input_size"), input_size); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::upsample_trilinear3d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d"); | |
auto output = Type::upsample_trilinear3d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_trilinear3d_forward(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleTrilinear3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UpsampleTrilinear3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->output_size = output_size; | |
} | |
auto output = as_variable(baseType->upsample_trilinear3d_forward(self_, output_size)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) const { | |
profiler::RecordFunction profiler("upsample_trilinear3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
std::shared_ptr<UpsampleTrilinear3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output })) { | |
grad_fn = std::make_shared<UpsampleTrilinear3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output }); | |
grad_fn->output_size = output_size; | |
} | |
auto grad_input = as_variable(baseType->upsample_trilinear3d_backward(grad_output_, output_size, input_size)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_trilinear3d_backward", { grad_output }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
setattr(n, jit::stringToSymbol("input_size"), input_size); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::upsample_nearest1d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d"); | |
auto output = Type::upsample_nearest1d(self, scale_factor); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_nearest1d_forward(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleNearest1DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UpsampleNearest1DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto output = as_variable(baseType->upsample_nearest1d_forward(self_, scale_factor)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_nearest1d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest1d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<UpsampleNearest1DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<UpsampleNearest1DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto grad_input = as_variable(baseType->upsample_nearest1d_backward(grad_output_, self_, scale_factor)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest1d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::upsample_nearest2d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d"); | |
auto output = Type::upsample_nearest2d(self, scale_factor); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_nearest2d_forward(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleNearest2DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UpsampleNearest2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto output = as_variable(baseType->upsample_nearest2d_forward(self_, scale_factor)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_nearest2d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<UpsampleNearest2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<UpsampleNearest2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto grad_input = as_variable(baseType->upsample_nearest2d_backward(grad_output_, self_, scale_factor)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest2d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::upsample_nearest3d(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d"); | |
auto output = Type::upsample_nearest3d(self, scale_factor); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_nearest3d_forward(const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UpsampleNearest3DBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UpsampleNearest3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto output = as_variable(baseType->upsample_nearest3d_forward(self_, scale_factor)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_forward", { self }, { output } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return output; | |
} | |
Tensor VariableType::upsample_nearest3d_backward(const Tensor & grad_output, const Tensor & self, int64_t scale_factor) const { | |
profiler::RecordFunction profiler("upsample_nearest3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
std::shared_ptr<UpsampleNearest3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self })) { | |
grad_fn = std::make_shared<UpsampleNearest3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self }); | |
grad_fn->scale_factor = scale_factor; | |
} | |
auto grad_input = as_variable(baseType->upsample_nearest3d_backward(grad_output_, self_, scale_factor)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "upsample_nearest3d_backward", { grad_output, self }, { grad_input } ); | |
setattr(n, jit::stringToSymbol("scale_factor"), scale_factor); | |
} | |
return grad_input; | |
} | |
Tensor VariableType::_sigmoid(const Tensor & self) const { | |
profiler::RecordFunction profiler("_sigmoid"); | |
auto output = Type::_sigmoid(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_sigmoid", { self }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
Tensor VariableType::_sigmoid_forward(const Tensor & self) const { | |
profiler::RecordFunction profiler("_sigmoid_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for _sigmoid_forward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto output = as_variable(baseType->_sigmoid_forward(self_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_sigmoid_forward", { self }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
Tensor VariableType::_sigmoid_backward(const Tensor & grad_output, const Tensor & output) const { | |
profiler::RecordFunction profiler("_sigmoid_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& output_ = unpack(output, "output", 1); | |
std::shared_ptr<SigmoidBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, output })) { | |
grad_fn = std::make_shared<SigmoidBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->_sigmoid_backward(grad_output_, output_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "_sigmoid_backward", { grad_output, output }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
Tensor VariableType::_tanh(const Tensor & self) const { | |
profiler::RecordFunction profiler("_tanh"); | |
auto output = Type::_tanh(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_tanh", { self }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
Tensor VariableType::_tanh_forward(const Tensor & self) const { | |
profiler::RecordFunction profiler("_tanh_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<Error>("the derivative for _tanh_forward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto output = as_variable(baseType->_tanh_forward(self_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_tanh_forward", { self }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
Tensor VariableType::_tanh_backward(const Tensor & grad_output, const Tensor & output) const { | |
profiler::RecordFunction profiler("_tanh_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& output_ = unpack(output, "output", 1); | |
std::shared_ptr<TanhBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, output })) { | |
grad_fn = std::make_shared<TanhBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, output }); | |
grad_fn->output_ = SavedVariable(output, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
} | |
auto grad_input = as_variable(baseType->_tanh_backward(grad_output_, output_)); | |
set_history(grad_input, grad_fn); | |
if (jit::tracer::isTracing( grad_output, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "_tanh_backward", { grad_output, output }, { grad_input } ); | |
(void)n; | |
} | |
return grad_input; | |
} | |
Tensor VariableType::thnn_batch_norm(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
profiler::RecordFunction profiler("thnn_batch_norm"); | |
auto output = Type::thnn_batch_norm(self, weight, bias, running_mean, running_var, training, momentum, eps); | |
if (jit::tracer::isTracing( self, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm", { self, weight, bias, running_mean, running_var }, { output } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("momentum"), momentum); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_forward(const Tensor & self, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) const { | |
profiler::RecordFunction profiler("thnn_batch_norm_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto weight_ = unpack_opt(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<ThnnBatchNormBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnBatchNormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->training = training; | |
grad_fn->eps = eps; | |
} | |
Tensor output, save_mean, save_std; | |
std::tie(output, save_mean, save_std) = as_variable(baseType->thnn_batch_norm_forward(self_, weight_, bias_, running_mean_, running_var_, training, momentum, eps)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_forward", { self, weight, bias, running_mean, running_var }, { output, save_mean, save_std } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("momentum"), momentum); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
} | |
if (grad_fn) { | |
grad_fn->save_mean_ = SavedVariable(save_mean, true); | |
grad_fn->save_std_ = SavedVariable(save_std, true); | |
} | |
return std::make_tuple(std::move(output), std::move(save_mean), std::move(save_std)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_batch_norm_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, bool training, double eps, const Tensor & save_mean, const Tensor & save_std, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_batch_norm_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto weight_ = unpack_opt(weight, "weight", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
auto& save_mean_ = unpack(save_mean, "save_mean", 7); | |
auto& save_std_ = unpack(save_std, "save_std", 8); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<ThnnBatchNormBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight, save_mean, save_std })) { | |
grad_fn = std::make_shared<ThnnBatchNormBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight, save_mean, save_std }); | |
grad_fn->save_mean_ = SavedVariable(save_mean, false); | |
grad_fn->save_std_ = SavedVariable(save_std, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->training = training; | |
grad_fn->eps = eps; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_batch_norm_backward(grad_output_, self_, weight_, running_mean_, running_var_, training, eps, save_mean_, save_std_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, running_mean, running_var, save_mean, save_std )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_batch_norm_backward", { grad_output, self, weight, running_mean, running_var, save_mean, save_std }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::thnn_conv_transpose2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d"); | |
auto output = Type::thnn_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConvTranspose2DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConvTranspose2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor output, columns, ones; | |
std::tie(output, columns, ones) = as_variable(baseType->thnn_conv_transpose2d_forward(self_, weight_, kernel_size, bias_, stride, padding, output_padding, dilation)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_forward", { self, weight, bias }, { output, columns, ones } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
if (grad_fn) { | |
grad_fn->columns_ = SavedVariable(columns, true); | |
grad_fn->ones_ = SavedVariable(ones, true); | |
} | |
return std::make_tuple(std::move(output), std::move(columns), std::move(ones)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& columns_ = unpack(columns, "columns", 8); | |
auto& ones_ = unpack(ones, "ones", 9); | |
check_no_requires_grad(columns, "columns"); | |
check_no_requires_grad(ones, "ones"); | |
std::shared_ptr<ThnnConvTranspose2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConvTranspose2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_transpose2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, output_padding, dilation, columns_, ones_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose2d_backward", { grad_output, self, weight, columns, ones }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::thnn_conv_transpose3d(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d"); | |
auto output = Type::thnn_conv_transpose3d(self, weight, bias, stride, padding, output_padding, dilation); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<ThnnConvTranspose3DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConvTranspose3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor output, finput, fgrad_input; | |
std::tie(output, finput, fgrad_input) = as_variable(baseType->thnn_conv_transpose3d_forward(self_, weight_, bias_, stride, padding, output_padding, dilation)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_forward", { self, weight, bias }, { output, finput, fgrad_input } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
if (grad_fn) { | |
grad_fn->finput_ = SavedVariable(finput, true); | |
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true); | |
} | |
return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_transpose3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& finput_ = unpack(finput, "finput", 7); | |
auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 8); | |
check_no_requires_grad(finput, "finput"); | |
check_no_requires_grad(fgrad_input, "fgrad_input"); | |
std::shared_ptr<ThnnConvTranspose3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConvTranspose3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_transpose3d_backward(grad_output_, self_, weight_, stride, padding, output_padding, dilation, finput_, fgrad_input_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_transpose3d_backward", { grad_output, self, weight, finput, fgrad_input }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::thnn_conv2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv2d"); | |
auto output = Type::thnn_conv2d(self, weight, kernel_size, bias, stride, padding); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConv2DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConv2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
Tensor output, finput, fgrad_input; | |
std::tie(output, finput, fgrad_input) = as_variable(baseType->thnn_conv2d_forward(self_, weight_, kernel_size, bias_, stride, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_forward", { self, weight, bias }, { output, finput, fgrad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
if (grad_fn) { | |
grad_fn->finput_ = SavedVariable(finput, true); | |
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true); | |
} | |
return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& finput_ = unpack(finput, "finput", 6); | |
auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 7); | |
check_no_requires_grad(finput, "finput"); | |
check_no_requires_grad(fgrad_input, "fgrad_input"); | |
std::shared_ptr<ThnnConv2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConv2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv2d_backward", { grad_output, self, weight, finput, fgrad_input }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d"); | |
auto output = Type::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return output; | |
} | |
Tensor VariableType::thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConvDepthwise2DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConvDepthwise2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
auto output = as_variable(baseType->thnn_conv_depthwise2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_forward", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_depthwise2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<ThnnConvDepthwise2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConvDepthwise2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor grad_input, grad_weight; | |
std::tie(grad_input, grad_weight) = as_variable(baseType->thnn_conv_depthwise2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, output_mask)); | |
set_history({ grad_input, grad_weight }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_depthwise2d_backward", { grad_output, self, weight }, { grad_input, grad_weight } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight)); | |
} | |
Tensor VariableType::thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv3d"); | |
auto output = Type::thnn_conv3d(self, weight, kernel_size, bias, stride, padding); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) const { | |
profiler::RecordFunction profiler("thnn_conv3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConv3DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConv3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
Tensor output, finput, fgrad_input; | |
std::tie(output, finput, fgrad_input) = as_variable(baseType->thnn_conv3d_forward(self_, weight_, kernel_size, bias_, stride, padding)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_forward", { self, weight, bias }, { output, finput, fgrad_input } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
} | |
if (grad_fn) { | |
grad_fn->finput_ = SavedVariable(finput, true); | |
grad_fn->fgrad_input_ = SavedVariable(fgrad_input, true); | |
} | |
return std::make_tuple(std::move(output), std::move(finput), std::move(fgrad_input)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& finput_ = unpack(finput, "finput", 6); | |
auto& fgrad_input_ = unpack(fgrad_input, "fgrad_input", 7); | |
check_no_requires_grad(finput, "finput"); | |
check_no_requires_grad(fgrad_input, "fgrad_input"); | |
std::shared_ptr<ThnnConv3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConv3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, finput_, fgrad_input_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, finput, fgrad_input )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv3d_backward", { grad_output, self, weight, finput, fgrad_input }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::thnn_conv_dilated2d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d"); | |
auto output = Type::thnn_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConvDilated2DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConvDilated2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor output, columns, ones; | |
std::tie(output, columns, ones) = as_variable(baseType->thnn_conv_dilated2d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_forward", { self, weight, bias }, { output, columns, ones } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
if (grad_fn) { | |
grad_fn->columns_ = SavedVariable(columns, true); | |
grad_fn->ones_ = SavedVariable(ones, true); | |
} | |
return std::make_tuple(std::move(output), std::move(columns), std::move(ones)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated2d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& columns_ = unpack(columns, "columns", 7); | |
auto& ones_ = unpack(ones, "ones", 8); | |
check_no_requires_grad(columns, "columns"); | |
check_no_requires_grad(ones, "ones"); | |
std::shared_ptr<ThnnConvDilated2DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConvDilated2DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_dilated2d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated2d_backward", { grad_output, self, weight, columns, ones }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::thnn_conv_dilated3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d"); | |
auto output = Type::thnn_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d", { self, weight, bias }, { output } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d_forward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 3); | |
std::shared_ptr<ThnnConvDilated3DBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ThnnConvDilated3DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kernel_size = kernel_size; | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor output, columns, ones; | |
std::tie(output, columns, ones) = as_variable(baseType->thnn_conv_dilated3d_forward(self_, weight_, kernel_size, bias_, stride, padding, dilation)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_forward", { self, weight, bias }, { output, columns, ones } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
} | |
if (grad_fn) { | |
grad_fn->columns_ = SavedVariable(columns, true); | |
grad_fn->ones_ = SavedVariable(ones, true); | |
} | |
return std::make_tuple(std::move(output), std::move(columns), std::move(ones)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("thnn_conv_dilated3d_backward"); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& columns_ = unpack(columns, "columns", 7); | |
auto& ones_ = unpack(ones, "ones", 8); | |
check_no_requires_grad(columns, "columns"); | |
check_no_requires_grad(ones, "ones"); | |
std::shared_ptr<ThnnConvDilated3DBackwardBackward> grad_fn; | |
if (compute_requires_grad({ grad_output, self, weight })) { | |
grad_fn = std::make_shared<ThnnConvDilated3DBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ grad_output, self, weight }); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->stride = stride; | |
grad_fn->padding = padding; | |
grad_fn->dilation = dilation; | |
} | |
Tensor grad_input, grad_weight, grad_bias; | |
std::tie(grad_input, grad_weight, grad_bias) = as_variable(baseType->thnn_conv_dilated3d_backward(grad_output_, self_, weight_, kernel_size, stride, padding, dilation, columns_, ones_, output_mask)); | |
set_history({ grad_input, grad_weight, grad_bias }, grad_fn); | |
if (jit::tracer::isTracing( grad_output, self, weight, columns, ones )) { | |
jit::Node *n = jit::tracer::recordTrace( "thnn_conv_dilated3d_backward", { grad_output, self, weight, columns, ones }, { grad_input, grad_weight, grad_bias } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(grad_input), std::move(grad_weight), std::move(grad_bias)); | |
} | |
Tensor VariableType::adaptive_avg_pool1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_avg_pool1d"); | |
auto result = Type::adaptive_avg_pool1d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_avg_pool1d", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::adaptive_max_pool1d(const Tensor & self, IntList output_size) const { | |
profiler::RecordFunction profiler("adaptive_max_pool1d"); | |
Tensor result0, result1; | |
std::tie(result0, result1) = Type::adaptive_max_pool1d(self, output_size); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "adaptive_max_pool1d", { self }, { result0, result1 } ); | |
setattr(n, jit::stringToSymbol("output_size"), output_size); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1)); | |
} | |
bool VariableType::allclose(const Tensor & self, const Tensor & other, double rtol, double atol) const { | |
profiler::RecordFunction profiler("allclose"); | |
auto result = Type::allclose(self, other, rtol, atol); | |
return result; | |
} | |
Tensor VariableType::batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) const { | |
profiler::RecordFunction profiler("batch_norm"); | |
auto result = Type::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); | |
if (jit::tracer::isTracing( input, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "batch_norm", { input, weight, bias, running_mean, running_var }, { result } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("momentum"), momentum); | |
setattr(n, jit::stringToSymbol("eps"), eps); | |
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled); | |
} | |
return result; | |
} | |
Tensor & VariableType::bernoulli_(Tensor & self, const Tensor & p, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli_"); | |
Type::bernoulli_(self, p, generator); | |
return self; | |
} | |
Tensor & VariableType::bernoulli_(Tensor & self, double p, Generator * generator) const { | |
profiler::RecordFunction profiler("bernoulli_"); | |
Type::bernoulli_(self, p, generator); | |
return self; | |
} | |
std::vector<Tensor> VariableType::chunk(const Tensor & self, int64_t chunks, int64_t dim) const { | |
profiler::RecordFunction profiler("chunk"); | |
auto result = Type::chunk(self, chunks, dim); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "chunk", { self }, flatten(result) ); | |
setattr(n, jit::stringToSymbol("chunks"), chunks); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor VariableType::convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups) const { | |
profiler::RecordFunction profiler("convolution"); | |
auto result = Type::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); | |
return result; | |
} | |
Tensor VariableType::_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) const { | |
profiler::RecordFunction profiler("_convolution"); | |
auto result = Type::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); | |
if (jit::tracer::isTracing( input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "_convolution", { input, weight, bias }, { result } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("transposed"), transposed); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled); | |
} | |
return result; | |
} | |
Tensor VariableType::_convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding) const { | |
profiler::RecordFunction profiler("_convolution_nogroup"); | |
auto result = Type::_convolution_nogroup(input, weight, bias, stride, padding, dilation, transposed, output_padding); | |
if (jit::tracer::isTracing( input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "_convolution_nogroup", { input, weight, bias }, { result } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("transposed"), transposed); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::_convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntList stride, IntList padding, IntList dilation, bool transposed, IntList output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("_convolution_double_backward"); | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = Type::_convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask); | |
if (jit::tracer::isTracing( ggI, ggW, ggb, gO, weight, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_convolution_double_backward", { ggI, ggW, ggb, gO, weight, self }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("transposed"), transposed); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("cudnn_enabled"), cudnn_enabled); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
Tensor VariableType::conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const { | |
profiler::RecordFunction profiler("conv1d"); | |
auto result = Type::conv1d(input, weight, bias, stride, padding, dilation, groups); | |
return result; | |
} | |
Tensor VariableType::conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const { | |
profiler::RecordFunction profiler("conv2d"); | |
auto result = Type::conv2d(input, weight, bias, stride, padding, dilation, groups); | |
return result; | |
} | |
Tensor VariableType::conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList dilation, int64_t groups) const { | |
profiler::RecordFunction profiler("conv3d"); | |
auto result = Type::conv3d(input, weight, bias, stride, padding, dilation, groups); | |
return result; | |
} | |
Tensor VariableType::conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) const { | |
profiler::RecordFunction profiler("conv_tbc"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto& bias_ = unpack(bias, "bias", 2); | |
std::shared_ptr<ConvTbcBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<ConvTbcBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->bias_ = SavedVariable(bias, false); | |
grad_fn->pad = pad; | |
} | |
auto result = as_variable(baseType->conv_tbc(self_, weight_, bias_, pad)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "conv_tbc", { self, weight, bias }, { result } ); | |
setattr(n, jit::stringToSymbol("pad"), pad); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad) const { | |
profiler::RecordFunction profiler("conv_tbc_backward"); | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = Type::conv_tbc_backward(self, input, weight, bias, pad); | |
if (jit::tracer::isTracing( self, input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "conv_tbc_backward", { self, input, weight, bias }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("pad"), pad); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
Tensor VariableType::conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const { | |
profiler::RecordFunction profiler("conv_transpose1d"); | |
auto result = Type::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation); | |
return result; | |
} | |
Tensor VariableType::conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const { | |
profiler::RecordFunction profiler("conv_transpose2d"); | |
auto result = Type::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation); | |
return result; | |
} | |
Tensor VariableType::conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, int64_t groups, IntList dilation) const { | |
profiler::RecordFunction profiler("conv_transpose3d"); | |
auto result = Type::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation); | |
return result; | |
} | |
Tensor VariableType::cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) const { | |
profiler::RecordFunction profiler("cudnn_affine_grid_generator"); | |
auto& theta_ = unpack(theta, "theta", 0); | |
std::shared_ptr<CudnnAffineGridGeneratorBackward> grad_fn; | |
if (compute_requires_grad({ theta })) { | |
grad_fn = std::make_shared<CudnnAffineGridGeneratorBackward>(); | |
grad_fn->next_functions = compute_next_functions({ theta }); | |
grad_fn->N = N; | |
grad_fn->C = C; | |
grad_fn->H = H; | |
grad_fn->W = W; | |
} | |
auto grid = as_variable(baseType->cudnn_affine_grid_generator(theta_, N, C, H, W)); | |
set_history(grid, grad_fn); | |
if (jit::tracer::isTracing( theta )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator", { theta }, { grid } ); | |
setattr(n, jit::stringToSymbol("N"), N); | |
setattr(n, jit::stringToSymbol("C"), C); | |
setattr(n, jit::stringToSymbol("H"), H); | |
setattr(n, jit::stringToSymbol("W"), W); | |
} | |
return grid; | |
} | |
Tensor VariableType::cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) const { | |
profiler::RecordFunction profiler("cudnn_affine_grid_generator_backward"); | |
auto grad_theta = Type::cudnn_affine_grid_generator_backward(grad, N, C, H, W); | |
if (jit::tracer::isTracing( grad )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_affine_grid_generator_backward", { grad }, { grad_theta } ); | |
setattr(n, jit::stringToSymbol("N"), N); | |
setattr(n, jit::stringToSymbol("C"), C); | |
setattr(n, jit::stringToSymbol("H"), H); | |
setattr(n, jit::stringToSymbol("W"), W); | |
} | |
return grad_theta; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) const { | |
profiler::RecordFunction profiler("cudnn_batch_norm"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<CudnnBatchNormBackward> grad_fn; | |
if (compute_requires_grad({ input, weight, bias })) { | |
grad_fn = std::make_shared<CudnnBatchNormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input, weight, bias }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->training = training; | |
grad_fn->epsilon = epsilon; | |
} | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = as_variable(baseType->cudnn_batch_norm(input_, weight_, bias_, running_mean_, running_var_, training, exponential_average_factor, epsilon)); | |
set_history({ result0, result1, result2 }, grad_fn); | |
if (jit::tracer::isTracing( input, weight, bias, running_mean, running_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm", { input, weight, bias, running_mean, running_var }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("training"), training); | |
setattr(n, jit::stringToSymbol("exponential_average_factor"), exponential_average_factor); | |
setattr(n, jit::stringToSymbol("epsilon"), epsilon); | |
} | |
if (grad_fn) { | |
grad_fn->result1_ = SavedVariable(result1, true); | |
grad_fn->result2_ = SavedVariable(result2, true); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon) const { | |
profiler::RecordFunction profiler("cudnn_batch_norm_backward"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
auto& running_mean_ = unpack(running_mean, "running_mean", 3); | |
auto& running_var_ = unpack(running_var, "running_var", 4); | |
auto save_mean_ = unpack_opt(save_mean, "save_mean", 5); | |
auto save_var_ = unpack_opt(save_var, "save_var", 6); | |
check_no_requires_grad(running_mean, "running_mean"); | |
check_no_requires_grad(running_var, "running_var"); | |
std::shared_ptr<CudnnBatchNormBackwardBackward> grad_fn; | |
if (compute_requires_grad({ input, grad_output, weight, save_mean, save_var })) { | |
grad_fn = std::make_shared<CudnnBatchNormBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input, grad_output, weight, save_mean, save_var }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->running_mean_ = SavedVariable(running_mean, false); | |
grad_fn->running_var_ = SavedVariable(running_var, false); | |
grad_fn->save_mean_ = SavedVariable(save_mean, false); | |
grad_fn->save_var_ = SavedVariable(save_var, false); | |
grad_fn->epsilon = epsilon; | |
} | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = as_variable(baseType->cudnn_batch_norm_backward(input_, grad_output_, weight_, running_mean_, running_var_, save_mean_, save_var_, epsilon)); | |
set_history({ result0, result1, result2 }, grad_fn); | |
if (jit::tracer::isTracing( input, grad_output, weight, running_mean, running_var, save_mean, save_var )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_batch_norm_backward", { input, grad_output, weight, running_mean, running_var, save_mean, save_var }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("epsilon"), epsilon); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
Tensor VariableType::cudnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<CudnnConvolutionBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<CudnnConvolutionBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
auto result = as_variable(baseType->cudnn_convolution(self_, weight_, bias_, padding, stride, dilation, groups, benchmark, deterministic)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution", { self, weight, bias }, { result } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return result; | |
} | |
Tensor VariableType::cudnn_convolution_backward_input(IntList self_size, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward_input"); | |
auto result = Type::cudnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_input", { grad_output, weight }, { result } ); | |
setattr(n, jit::stringToSymbol("self_size"), self_size); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<CudnnConvolutionBackwardBackward> grad_fn; | |
if (compute_requires_grad({ self, grad_output, weight })) { | |
grad_fn = std::make_shared<CudnnConvolutionBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, grad_output, weight }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = as_variable(baseType->cudnn_convolution_backward(self_, grad_output_, weight_, padding, stride, dilation, groups, benchmark, deterministic, output_mask)); | |
set_history({ result0, result1, result2 }, grad_fn); | |
if (jit::tracer::isTracing( self, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward", { self, grad_output, weight }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
Tensor VariableType::cudnn_convolution_backward_bias(const Tensor & grad_output) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward_bias"); | |
auto result = Type::cudnn_convolution_backward_bias(grad_output); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_bias", { grad_output }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::cudnn_convolution_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_backward_weight"); | |
auto result = Type::cudnn_convolution_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_backward_weight", { grad_output, self }, { result } ); | |
setattr(n, jit::stringToSymbol("weight_size"), weight_size); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return result; | |
} | |
Tensor VariableType::cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<CudnnConvolutionTransposeBackward> grad_fn; | |
if (compute_requires_grad({ self, weight, bias })) { | |
grad_fn = std::make_shared<CudnnConvolutionTransposeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, weight, bias }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
auto result = as_variable(baseType->cudnn_convolution_transpose(self_, weight_, bias_, padding, output_padding, stride, dilation, groups, benchmark, deterministic)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose", { self, weight, bias }, { result } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntList padding, IntList output_padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 1); | |
auto& weight_ = unpack(weight, "weight", 2); | |
std::shared_ptr<CudnnConvolutionTransposeBackwardBackward> grad_fn; | |
if (compute_requires_grad({ self, grad_output, weight })) { | |
grad_fn = std::make_shared<CudnnConvolutionTransposeBackwardBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, grad_output, weight }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->grad_output_ = SavedVariable(grad_output, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->padding = padding; | |
grad_fn->output_padding = output_padding; | |
grad_fn->stride = stride; | |
grad_fn->dilation = dilation; | |
grad_fn->groups = groups; | |
grad_fn->benchmark = benchmark; | |
grad_fn->deterministic = deterministic; | |
} | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = as_variable(baseType->cudnn_convolution_transpose_backward(self_, grad_output_, weight_, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask)); | |
set_history({ result0, result1, result2 }, grad_fn); | |
if (jit::tracer::isTracing( self, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward", { self, grad_output, weight }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("output_padding"), output_padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
Tensor VariableType::cudnn_convolution_transpose_backward_bias(const Tensor & grad_output) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_bias"); | |
auto result = Type::cudnn_convolution_transpose_backward_bias(grad_output); | |
if (jit::tracer::isTracing( grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_bias", { grad_output }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_input"); | |
auto result = Type::cudnn_convolution_transpose_backward_input(grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_input", { grad_output, weight }, { result } ); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return result; | |
} | |
Tensor VariableType::cudnn_convolution_transpose_backward_weight(IntList weight_size, const Tensor & grad_output, const Tensor & self, IntList padding, IntList stride, IntList dilation, int64_t groups, bool benchmark, bool deterministic) const { | |
profiler::RecordFunction profiler("cudnn_convolution_transpose_backward_weight"); | |
auto result = Type::cudnn_convolution_transpose_backward_weight(weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); | |
if (jit::tracer::isTracing( grad_output, self )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_convolution_transpose_backward_weight", { grad_output, self }, { result } ); | |
setattr(n, jit::stringToSymbol("weight_size"), weight_size); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("groups"), groups); | |
setattr(n, jit::stringToSymbol("benchmark"), benchmark); | |
setattr(n, jit::stringToSymbol("deterministic"), deterministic); | |
} | |
return result; | |
} | |
Tensor VariableType::cudnn_grid_sampler(const Tensor & self, const Tensor & grid) const { | |
profiler::RecordFunction profiler("cudnn_grid_sampler"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& grid_ = unpack(grid, "grid", 1); | |
std::shared_ptr<CudnnGridSamplerBackward> grad_fn; | |
if (compute_requires_grad({ self, grid })) { | |
grad_fn = std::make_shared<CudnnGridSamplerBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self, grid }); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->grid_ = SavedVariable(grid, false); | |
} | |
auto output = as_variable(baseType->cudnn_grid_sampler(self_, grid_)); | |
set_history(output, grad_fn); | |
if (jit::tracer::isTracing( self, grid )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler", { self, grid }, { output } ); | |
(void)n; | |
} | |
return output; | |
} | |
std::tuple<Tensor,Tensor> VariableType::cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output) const { | |
profiler::RecordFunction profiler("cudnn_grid_sampler_backward"); | |
Tensor grad_self, grad_grid; | |
std::tie(grad_self, grad_grid) = Type::cudnn_grid_sampler_backward(self, grid, grad_output); | |
if (jit::tracer::isTracing( self, grid, grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "cudnn_grid_sampler_backward", { self, grid, grad_output }, { grad_self, grad_grid } ); | |
(void)n; | |
} | |
return std::make_tuple(std::move(grad_self), std::move(grad_grid)); | |
} | |
Tensor VariableType::det(const Tensor & self) const { | |
profiler::RecordFunction profiler("det"); | |
auto result = Type::det(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "det", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor,Tensor,Tensor> VariableType::_det_with_svd(const Tensor & self) const { | |
profiler::RecordFunction profiler("_det_with_svd"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<DetWithSvdBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<DetWithSvdBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_ = SavedVariable(self, false); | |
} | |
Tensor result0, result1, result2, result3; | |
std::tie(result0, result1, result2, result3) = as_variable(baseType->_det_with_svd(self_)); | |
set_history({ result0, result1, result2, result3 }, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "_det_with_svd", { self }, { result0, result1, result2, result3 } ); | |
(void)n; | |
} | |
if (grad_fn) { | |
grad_fn->result0_ = SavedVariable(result0, true); | |
grad_fn->result1_ = SavedVariable(result1, true); | |
grad_fn->result2_ = SavedVariable(result2, true); | |
grad_fn->result3_ = SavedVariable(result3, true); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3)); | |
} | |
Tensor VariableType::embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const { | |
profiler::RecordFunction profiler("embedding"); | |
auto& weight_ = unpack(weight, "weight", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<EmbeddingBackward> grad_fn; | |
if (compute_requires_grad({ weight })) { | |
grad_fn = std::make_shared<EmbeddingBackward>(); | |
grad_fn->next_functions = compute_next_functions({ weight }); | |
grad_fn->weight_argsize_0 = weight.size(0); | |
grad_fn->indices_ = SavedVariable(indices, false); | |
grad_fn->padding_idx = padding_idx; | |
grad_fn->scale_grad_by_freq = scale_grad_by_freq; | |
grad_fn->sparse = sparse; | |
} | |
auto result = as_variable(baseType->embedding(weight_, indices_, padding_idx, scale_grad_by_freq, sparse)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( weight, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "embedding", { weight, indices }, { result } ); | |
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx); | |
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq); | |
setattr(n, jit::stringToSymbol("sparse"), sparse); | |
} | |
return result; | |
} | |
Tensor VariableType::embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) const { | |
profiler::RecordFunction profiler("embedding_backward"); | |
auto result = Type::embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); | |
if (jit::tracer::isTracing( grad, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "embedding_backward", { grad, indices }, { result } ); | |
setattr(n, jit::stringToSymbol("num_weights"), num_weights); | |
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx); | |
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq); | |
setattr(n, jit::stringToSymbol("sparse"), sparse); | |
} | |
return result; | |
} | |
Tensor VariableType::embedding_dense_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const { | |
profiler::RecordFunction profiler("embedding_dense_backward"); | |
auto& grad_ = unpack(grad, "grad", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ grad, indices })) { | |
grad_fn = std::make_shared<Error>("the derivative for embedding_dense_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ grad, indices }); | |
} | |
auto result = as_variable(baseType->embedding_dense_backward(grad_, indices_, num_weights, padding_idx, scale_grad_by_freq)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( grad, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "embedding_dense_backward", { grad, indices }, { result } ); | |
setattr(n, jit::stringToSymbol("num_weights"), num_weights); | |
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx); | |
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq); | |
} | |
return result; | |
} | |
Tensor & VariableType::embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) const { | |
profiler::RecordFunction profiler("embedding_renorm_"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& indices_ = unpack_long(indices, "indices", 1); | |
check_inplace(self); | |
std::shared_ptr<EmbeddingRenormBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<EmbeddingRenormBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
baseType->embedding_renorm_(self_, indices_, max_norm, norm_type); | |
increment_version(self); | |
rebase_history(self, grad_fn); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "embedding_renorm", { self, indices }, { self } ); | |
setattr(n, jit::stringToSymbol("max_norm"), max_norm); | |
setattr(n, jit::stringToSymbol("norm_type"), norm_type); | |
} | |
return self; | |
} | |
Tensor VariableType::embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) const { | |
profiler::RecordFunction profiler("embedding_sparse_backward"); | |
auto result = Type::embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq); | |
if (jit::tracer::isTracing( grad, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "embedding_sparse_backward", { grad, indices }, { result } ); | |
setattr(n, jit::stringToSymbol("num_weights"), num_weights); | |
setattr(n, jit::stringToSymbol("padding_idx"), padding_idx); | |
setattr(n, jit::stringToSymbol("scale_grad_by_freq"), scale_grad_by_freq); | |
} | |
return result; | |
} | |
Tensor VariableType::expand(const Tensor & self, IntList size) const { | |
profiler::RecordFunction profiler("expand"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<ExpandBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<ExpandBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto result = as_view(self, baseType->expand(self_, size)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "expand", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("size"), size); | |
} | |
return result; | |
} | |
Tensor VariableType::expand_as(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("expand_as"); | |
auto result = Type::expand_as(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "expand_as", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::index(const Tensor & self, TensorList indices) const { | |
profiler::RecordFunction profiler("index"); | |
auto result = Type::index(self, indices); | |
if (jit::tracer::isTracing( self, indices )) { | |
jit::Node *n = jit::tracer::recordTrace( "index", flatten( self, indices ), { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::index_put_(Tensor & self, TensorList indices, const Tensor & values) const { | |
profiler::RecordFunction profiler("index_put_"); | |
Type::index_put_(self, indices, values); | |
if (jit::tracer::isTracing( self, indices, values )) { | |
jit::Node *n = jit::tracer::recordTrace( "index_put", flatten( self, indices, values ), { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
bool VariableType::is_cuda(const Tensor & self) const { | |
auto result = Type::is_cuda(self); | |
return result; | |
} | |
bool VariableType::is_distributed(const Tensor & self) const { | |
auto result = Type::is_distributed(self); | |
return result; | |
} | |
bool VariableType::is_nonzero(const Tensor & self) const { | |
profiler::RecordFunction profiler("is_nonzero"); | |
auto result = Type::is_nonzero(self); | |
return result; | |
} | |
bool VariableType::is_same_size(const Tensor & self, const Tensor & other) const { | |
auto result = Type::is_same_size(self, other); | |
return result; | |
} | |
bool VariableType::is_signed(const Tensor & self) const { | |
auto result = Type::is_signed(self); | |
return result; | |
} | |
bool VariableType::is_sparse(const Tensor & self) const { | |
auto result = Type::is_sparse(self); | |
return result; | |
} | |
Tensor VariableType::matmul(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("matmul"); | |
auto result = Type::matmul(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "matmul", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::max_pool1d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) const { | |
profiler::RecordFunction profiler("max_pool1d"); | |
Tensor result0, result1; | |
std::tie(result0, result1) = Type::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "max_pool1d", { self }, { result0, result1 } ); | |
setattr(n, jit::stringToSymbol("kernel_size"), kernel_size); | |
setattr(n, jit::stringToSymbol("stride"), stride); | |
setattr(n, jit::stringToSymbol("padding"), padding); | |
setattr(n, jit::stringToSymbol("dilation"), dilation); | |
setattr(n, jit::stringToSymbol("ceil_mode"), ceil_mode); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1)); | |
} | |
Tensor VariableType::narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) const { | |
profiler::RecordFunction profiler("narrow"); | |
auto result = Type::narrow(self, dim, start, length); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "narrow", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("start"), start); | |
setattr(n, jit::stringToSymbol("length"), length); | |
} | |
return result; | |
} | |
Tensor VariableType::nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& weight_ = unpack(weight, "weight", 1); | |
auto bias_ = unpack_opt(bias, "bias", 2); | |
std::shared_ptr<NnpackSpatialConvolutionBackward> grad_fn; | |
if (compute_requires_grad({ input, weight, bias })) { | |
grad_fn = std::make_shared<NnpackSpatialConvolutionBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input, weight, bias }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->weight_ = SavedVariable(weight, false); | |
grad_fn->kW = kW; | |
grad_fn->kH = kH; | |
grad_fn->padW = padW; | |
grad_fn->padH = padH; | |
grad_fn->weight_sizes = weight.sizes(); | |
} | |
auto result = as_variable(baseType->nnpack_spatial_convolution(input_, weight_, bias_, kW, kH, padW, padH)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( input, weight, bias )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution", { input, weight, bias }, { result } ); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor,Tensor> VariableType::nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, int64_t kW, int64_t kH, int64_t padW, int64_t padH, std::array<bool,3> output_mask) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward"); | |
Tensor result0, result1, result2; | |
std::tie(result0, result1, result2) = Type::nnpack_spatial_convolution_backward(input, grad_output, weight, kW, kH, padW, padH, output_mask); | |
if (jit::tracer::isTracing( input, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward", { input, grad_output, weight }, { result0, result1, result2 } ); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
setattr(n, jit::stringToSymbol("output_mask"), output_mask); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2)); | |
} | |
Tensor VariableType::nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward_input"); | |
auto result = Type::nnpack_spatial_convolution_backward_input(input, grad_output, weight, kW, kH, padW, padH); | |
if (jit::tracer::isTracing( input, grad_output, weight )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_input", { input, grad_output, weight }, { result } ); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
} | |
return result; | |
} | |
Tensor VariableType::nnpack_spatial_convolution_backward_weight(const Tensor & input, IntList weight_size, const Tensor & grad_output, int64_t kW, int64_t kH, int64_t padW, int64_t padH) const { | |
profiler::RecordFunction profiler("nnpack_spatial_convolution_backward_weight"); | |
auto result = Type::nnpack_spatial_convolution_backward_weight(input, weight_size, grad_output, kW, kH, padW, padH); | |
if (jit::tracer::isTracing( input, grad_output )) { | |
jit::Node *n = jit::tracer::recordTrace( "nnpack_spatial_convolution_backward_weight", { input, grad_output }, { result } ); | |
setattr(n, jit::stringToSymbol("weight_size"), weight_size); | |
setattr(n, jit::stringToSymbol("kW"), kW); | |
setattr(n, jit::stringToSymbol("kH"), kH); | |
setattr(n, jit::stringToSymbol("padW"), padW); | |
setattr(n, jit::stringToSymbol("padH"), padH); | |
} | |
return result; | |
} | |
Tensor VariableType::permute(const Tensor & self, IntList dims) const { | |
profiler::RecordFunction profiler("permute"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<PermuteBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<PermuteBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dims = dims; | |
} | |
auto result = as_view(self, baseType->permute(self_, dims)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "permute", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dims"), dims); | |
} | |
return result; | |
} | |
Tensor VariableType::pin_memory(const Tensor & self) const { | |
profiler::RecordFunction profiler("pin_memory"); | |
auto result = Type::pin_memory(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "pin_memory", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
std::tuple<Tensor,Tensor> VariableType::RoiPooling2d_forward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale) const { | |
profiler::RecordFunction profiler("RoiPooling2d_forward"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& rois_ = unpack(rois, "rois", 1); | |
check_no_requires_grad(rois, "rois"); | |
std::shared_ptr<Roipooling2DBackward> grad_fn; | |
if (compute_requires_grad({ input })) { | |
grad_fn = std::make_shared<Roipooling2DBackward>(); | |
grad_fn->next_functions = compute_next_functions({ input }); | |
grad_fn->input_ = SavedVariable(input, false); | |
grad_fn->rois_ = SavedVariable(rois, false); | |
grad_fn->pooledHeight = pooledHeight; | |
grad_fn->pooledWidth = pooledWidth; | |
grad_fn->spatialScale = spatialScale; | |
} | |
Tensor result0, result1; | |
std::tie(result0, result1) = as_variable(baseType->RoiPooling2d_forward(input_, rois_, pooledHeight, pooledWidth, spatialScale)); | |
set_history(result0, grad_fn); | |
if (jit::tracer::isTracing( input, rois )) { | |
jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_forward", { input, rois }, { result0, result1 } ); | |
setattr(n, jit::stringToSymbol("pooledHeight"), pooledHeight); | |
setattr(n, jit::stringToSymbol("pooledWidth"), pooledWidth); | |
setattr(n, jit::stringToSymbol("spatialScale"), spatialScale); | |
} | |
if (grad_fn) { | |
grad_fn->result1_ = SavedVariable(result1, true); | |
} | |
return std::make_tuple(std::move(result0), std::move(result1)); | |
} | |
Tensor VariableType::RoiPooling2d_backward(const Tensor & input, const Tensor & rois, int64_t pooledHeight, int64_t pooledWidth, double spatialScale, const Tensor & gradOutput, const Tensor & argmaxes) const { | |
profiler::RecordFunction profiler("RoiPooling2d_backward"); | |
auto& input_ = unpack(input, "input", 0); | |
auto& rois_ = unpack(rois, "rois", 1); | |
auto& gradOutput_ = unpack(gradOutput, "gradOutput", 5); | |
auto& argmaxes_ = unpack(argmaxes, "argmaxes", 6); | |
std::shared_ptr<Error> grad_fn; | |
if (compute_requires_grad({ input, rois, gradOutput, argmaxes })) { | |
grad_fn = std::make_shared<Error>("the derivative for RoiPooling2d_backward is not implemented"); | |
grad_fn->next_functions = compute_next_functions({ input, rois, gradOutput, argmaxes }); | |
} | |
auto result = as_variable(baseType->RoiPooling2d_backward(input_, rois_, pooledHeight, pooledWidth, spatialScale, gradOutput_, argmaxes_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( input, rois, gradOutput, argmaxes )) { | |
jit::Node *n = jit::tracer::recordTrace( "RoiPooling2d_backward", { input, rois, gradOutput, argmaxes }, { result } ); | |
setattr(n, jit::stringToSymbol("pooledHeight"), pooledHeight); | |
setattr(n, jit::stringToSymbol("pooledWidth"), pooledWidth); | |
setattr(n, jit::stringToSymbol("spatialScale"), spatialScale); | |
} | |
return result; | |
} | |
Tensor VariableType::rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu"); | |
auto result = Type::rrelu(self, lower, upper, training, generator); | |
return result; | |
} | |
Tensor & VariableType::rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) const { | |
profiler::RecordFunction profiler("rrelu_"); | |
Type::rrelu_(self, lower, upper, training, generator); | |
return self; | |
} | |
Tensor VariableType::select(const Tensor & self, int64_t dim, int64_t index) const { | |
profiler::RecordFunction profiler("select"); | |
auto result = Type::select(self, dim, index); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "select", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("index"), index); | |
} | |
return result; | |
} | |
Tensor VariableType::selu(const Tensor & self) const { | |
profiler::RecordFunction profiler("selu"); | |
auto result = Type::selu(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor & VariableType::selu_(Tensor & self) const { | |
profiler::RecordFunction profiler("selu_"); | |
Type::selu_(self); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "selu", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
int64_t VariableType::size(const Tensor & self, int64_t dim) const { | |
auto result = Type::size(self, dim); | |
return result; | |
} | |
Tensor VariableType::slice(const Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) const { | |
profiler::RecordFunction profiler("slice"); | |
auto result = Type::slice(self, dim, start, end, step); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "slice", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
setattr(n, jit::stringToSymbol("start"), start); | |
setattr(n, jit::stringToSymbol("end"), end); | |
setattr(n, jit::stringToSymbol("step"), step); | |
} | |
return result; | |
} | |
std::vector<Tensor> VariableType::split(const Tensor & self, int64_t split_size, int64_t dim) const { | |
profiler::RecordFunction profiler("split"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SplitBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SplitBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_ = SavedVariable(self, false); | |
grad_fn->split_size = split_size; | |
grad_fn->dim = dim; | |
} | |
auto result = as_variable(baseType->split(self_, split_size, dim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "split", { self }, flatten(result) ); | |
setattr(n, jit::stringToSymbol("split_size"), split_size); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor VariableType::squeeze(const Tensor & self) const { | |
profiler::RecordFunction profiler("squeeze"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SqueezeBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SqueezeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
auto result = as_view(self, baseType->squeeze(self_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::squeeze(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("squeeze"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<SqueezeBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SqueezeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_argsize_dim = self.size(dim); | |
grad_fn->dim = dim; | |
} | |
auto result = as_view(self, baseType->squeeze(self_, dim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor & VariableType::squeeze_(Tensor & self) const { | |
profiler::RecordFunction profiler("squeeze_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SqueezeBackward0> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SqueezeBackward0>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
} | |
baseType->squeeze_(self_); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { self } ); | |
(void)n; | |
} | |
return self; | |
} | |
Tensor & VariableType::squeeze_(Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("squeeze_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<SqueezeBackward1> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<SqueezeBackward1>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->self_sizes = self.sizes(); | |
grad_fn->self_argsize_dim = self.size(dim); | |
grad_fn->dim = dim; | |
} | |
baseType->squeeze_(self_, dim); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "squeeze", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::stack(TensorList tensors, int64_t dim) const { | |
profiler::RecordFunction profiler("stack"); | |
auto result = Type::stack(tensors, dim); | |
if (jit::tracer::isTracing( tensors )) { | |
jit::Node *n = jit::tracer::recordTrace( "stack", flatten( tensors ), { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor VariableType::stft(const Tensor & self, int64_t frame_length, int64_t hop, int64_t fft_size, bool return_onesided, const Tensor & window, int64_t pad_end) const { | |
profiler::RecordFunction profiler("stft"); | |
auto result = Type::stft(self, frame_length, hop, fft_size, return_onesided, window, pad_end); | |
if (jit::tracer::isTracing( self, window )) { | |
jit::Node *n = jit::tracer::recordTrace( "stft", { self, window }, { result } ); | |
setattr(n, jit::stringToSymbol("frame_length"), frame_length); | |
setattr(n, jit::stringToSymbol("hop"), hop); | |
setattr(n, jit::stringToSymbol("fft_size"), fft_size); | |
setattr(n, jit::stringToSymbol("return_onesided"), return_onesided); | |
setattr(n, jit::stringToSymbol("pad_end"), pad_end); | |
} | |
return result; | |
} | |
int64_t VariableType::stride(const Tensor & self, int64_t dim) const { | |
auto result = Type::stride(self, dim); | |
return result; | |
} | |
Tensor VariableType::type_as(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("type_as"); | |
auto result = Type::type_as(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "type_as", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::unsqueeze(const Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("unsqueeze"); | |
auto& self_ = unpack(self, "self", 0); | |
std::shared_ptr<UnsqueezeBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UnsqueezeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
} | |
auto result = as_view(self, baseType->unsqueeze(self_, dim)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { result } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return result; | |
} | |
Tensor & VariableType::unsqueeze_(Tensor & self, int64_t dim) const { | |
profiler::RecordFunction profiler("unsqueeze_"); | |
auto& self_ = unpack(self, "self", 0); | |
check_inplace(self); | |
std::shared_ptr<UnsqueezeBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<UnsqueezeBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
grad_fn->dim = dim; | |
} | |
baseType->unsqueeze_(self_, dim); | |
ensure_no_aten_scalars(self); | |
increment_version(self); | |
set_history(self, grad_fn); | |
if (jit::tracer::isTracing( self )) { | |
jit::Node *n = jit::tracer::recordTrace( "unsqueeze", { self }, { self } ); | |
setattr(n, jit::stringToSymbol("dim"), dim); | |
} | |
return self; | |
} | |
Tensor VariableType::view_as(const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("view_as"); | |
auto result = Type::view_as(self, other); | |
if (jit::tracer::isTracing( self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "view_as", { self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::where(const Tensor & condition, const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("where"); | |
auto result = Type::where(condition, self, other); | |
if (jit::tracer::isTracing( condition, self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "where", { condition, self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::_s_where(const Tensor & condition, const Tensor & self, const Tensor & other) const { | |
profiler::RecordFunction profiler("_s_where"); | |
auto& condition_ = unpack_byte(condition, "condition", 0); | |
auto& self_ = unpack(self, "self", 1); | |
auto& other_ = unpack(other, "other", 2); | |
std::shared_ptr<SWhereBackward> grad_fn; | |
if (compute_requires_grad({ condition, self, other })) { | |
grad_fn = std::make_shared<SWhereBackward>(); | |
grad_fn->next_functions = compute_next_functions({ condition, self, other }); | |
grad_fn->condition_info = condition; | |
grad_fn->condition_ = SavedVariable(condition, false); | |
} | |
auto result = as_variable(baseType->_s_where(condition_, self_, other_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( condition, self, other )) { | |
jit::Node *n = jit::tracer::recordTrace( "_s_where", { condition, self, other }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
Tensor VariableType::_standard_gamma_grad(const Tensor & self, const Tensor & output) const { | |
profiler::RecordFunction profiler("_standard_gamma_grad"); | |
auto& self_ = unpack(self, "self", 0); | |
auto& output_ = unpack(output, "output", 1); | |
std::shared_ptr<StandardGammaGradBackward> grad_fn; | |
if (compute_requires_grad({ self })) { | |
grad_fn = std::make_shared<StandardGammaGradBackward>(); | |
grad_fn->next_functions = compute_next_functions({ self }); | |
} | |
auto result = as_variable(baseType->_standard_gamma_grad(self_, output_)); | |
set_history(result, grad_fn); | |
if (jit::tracer::isTracing( self, output )) { | |
jit::Node *n = jit::tracer::recordTrace( "_standard_gamma_grad", { self, output }, { result } ); | |
(void)n; | |
} | |
return result; | |
} | |
}} // namespace torch::autograd |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment