Created
March 10, 2021 03:56
-
-
Save ailzhang/1d6a0f0a9d4a6970722ab8f2068e4c2a to your computer and use it in GitHub Desktop.
InplaceOrViewTypeEverything.cpp
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include "torch/csrc/autograd/VariableTypeUtils.h" | |
#include <torch/library.h> | |
#include "torch/csrc/autograd/function.h" | |
#include <ATen/RedispatchFunctions.h> | |
#include "ATen/quantized/Quantizer.h" | |
// @generated from tools/autograd/templates/InplaceOrViewType.cpp | |
using namespace at; | |
namespace torch { | |
namespace InplaceOrView { | |
namespace { | |
Tensor & __ilshift___Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::__ilshift__(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & __ilshift___Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::__ilshift__(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & __irshift___Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::__irshift__(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & __irshift___Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::__irshift__(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & _add_relu__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_add_relu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & _add_relu_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_add_relu_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _addmv_impl_(c10::DispatchKeySet ks, Tensor & self, const Tensor & self2, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_addmv_impl_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, self2, mat, vec, beta, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
void _amp_foreach_non_finite_check_and_unscale_(c10::DispatchKeySet ks, TensorList self, Tensor & found_inf, const Tensor & inv_scale) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_amp_foreach_non_finite_check_and_unscale_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, found_inf, inv_scale); | |
} | |
} | |
Tensor & _bmm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mat2, bool deterministic, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_bmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat2, deterministic, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _cat_out_out(c10::DispatchKeySet ks, TensorList tensors, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_cat_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), tensors, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _coalesced_(c10::DispatchKeySet ks, Tensor & self, bool coalesced) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_coalesced_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, coalesced); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & _compute_linear_combination_out_out(c10::DispatchKeySet ks, const Tensor & input, const Tensor & coefficients, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_compute_linear_combination_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), input, coefficients, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _cumprod_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_cumprod_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _cumsum_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_cumsum_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _fft_c2c_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, int64_t normalization, bool forward, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_fft_c2c_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, normalization, forward, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _fft_c2r_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, int64_t normalization, int64_t last_dim_size, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_fft_c2r_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, normalization, last_dim_size, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _fft_r2c_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, int64_t normalization, bool onesided, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_fft_r2c_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, normalization, onesided, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
void _foreach_abs_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_abs_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_acos_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_acos_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_add__Scalar(c10::DispatchKeySet ks, TensorList self, Scalar scalar) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalar); | |
} | |
} | |
void _foreach_add__List(c10::DispatchKeySet ks, TensorList self, TensorList other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
} | |
void _foreach_add__ScalarList(c10::DispatchKeySet ks, TensorList self, ArrayRef<Scalar> scalars) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalars); | |
} | |
} | |
void _foreach_addcdiv__Scalar(c10::DispatchKeySet ks, TensorList self, TensorList tensor1, TensorList tensor2, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_addcdiv_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, value); | |
} | |
} | |
void _foreach_addcdiv__ScalarList(c10::DispatchKeySet ks, TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef<Scalar> scalars) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_addcdiv_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, scalars); | |
} | |
} | |
void _foreach_addcmul__Scalar(c10::DispatchKeySet ks, TensorList self, TensorList tensor1, TensorList tensor2, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_addcmul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, value); | |
} | |
} | |
void _foreach_addcmul__ScalarList(c10::DispatchKeySet ks, TensorList self, TensorList tensor1, TensorList tensor2, ArrayRef<Scalar> scalars) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_addcmul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, scalars); | |
} | |
} | |
void _foreach_asin_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_asin_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_atan_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_atan_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_ceil_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_ceil_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_cos_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_cos_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_cosh_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_cosh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_div__Scalar(c10::DispatchKeySet ks, TensorList self, Scalar scalar) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalar); | |
} | |
} | |
void _foreach_div__List(c10::DispatchKeySet ks, TensorList self, TensorList other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
} | |
void _foreach_div__ScalarList(c10::DispatchKeySet ks, TensorList self, ArrayRef<Scalar> scalars) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalars); | |
} | |
} | |
void _foreach_erf_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_erf_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_erfc_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_erfc_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_exp_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_exp_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_expm1_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_expm1_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_floor_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_floor_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_frac_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_frac_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_lgamma_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_lgamma_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_log10_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_log10_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_log1p_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_log1p_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_log2_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_log2_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_log_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_log_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_mul__Scalar(c10::DispatchKeySet ks, TensorList self, Scalar scalar) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_mul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalar); | |
} | |
} | |
void _foreach_mul__List(c10::DispatchKeySet ks, TensorList self, TensorList other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_mul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
} | |
void _foreach_mul__ScalarList(c10::DispatchKeySet ks, TensorList self, ArrayRef<Scalar> scalars) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_mul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalars); | |
} | |
} | |
void _foreach_neg_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_neg_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_reciprocal_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_reciprocal_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_round_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_round_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_sigmoid_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sigmoid_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_sin_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sin_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_sinh_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sinh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_sqrt_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sqrt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_sub__Scalar(c10::DispatchKeySet ks, TensorList self, Scalar scalar) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sub_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalar); | |
} | |
} | |
void _foreach_sub__List(c10::DispatchKeySet ks, TensorList self, TensorList other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sub_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
} | |
void _foreach_sub__ScalarList(c10::DispatchKeySet ks, TensorList self, ArrayRef<Scalar> scalars) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_sub_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, scalars); | |
} | |
} | |
void _foreach_tan_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_tan_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_tanh_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_tanh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_trunc_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_trunc_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
void _foreach_zero_(c10::DispatchKeySet ks, TensorList self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_foreach_zero_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
} | |
Tensor & _index_copy_(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_index_copy_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, source); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & _index_put_impl_(c10::DispatchKeySet ks, Tensor & self, const c10::List<c10::optional<Tensor>> & indices, const Tensor & values, bool accumulate, bool unsafe) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_index_put_impl_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, indices, values, accumulate, unsafe); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor _indices(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::_indices(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
auto result = torch::autograd::as_view(self, tmp, /* is_bw_differentiable */ false, /* is_fw_differentiable */ false); | |
return result; | |
} | |
Tensor & _linalg_inv_out_helper_(c10::DispatchKeySet ks, Tensor & self, Tensor & infos_lu, Tensor & infos_getri) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_linalg_inv_out_helper_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, infos_lu, infos_getri); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & _linalg_solve_out_helper_(c10::DispatchKeySet ks, Tensor & self, Tensor & other, Tensor & infos) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_linalg_solve_out_helper_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, infos); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & _logcumsumexp_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_logcumsumexp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & _mkldnn_transpose_(c10::DispatchKeySet ks, Tensor & self, int64_t dim0, int64_t dim1) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_mkldnn_transpose_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim0, dim1); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
std::tuple<Tensor &,Tensor &> _mode_out_values(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_mode_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor & _stack_out_out(c10::DispatchKeySet ks, TensorList tensors, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::_stack_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), tensors, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor _values(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::_values(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
auto result = torch::autograd::as_view(self, tmp, /* is_bw_differentiable */ false, /* is_fw_differentiable */ false); | |
return result; | |
} | |
Tensor & abs_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::abs_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & abs_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::abs_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & acos_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::acos_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & acos_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::acos_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & acosh_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::acosh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & acosh_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::acosh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & adaptive_avg_pool2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_avg_pool2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & adaptive_avg_pool3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_avg_pool3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & adaptive_avg_pool3d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_avg_pool3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & adaptive_max_pool2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_max_pool2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, indices, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> adaptive_max_pool2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_max_pool2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, out, indices); | |
} | |
torch::autograd::increment_version(out); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(out, indices); | |
} | |
Tensor & adaptive_max_pool3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & indices, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_max_pool3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, indices, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, Tensor & out, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::adaptive_max_pool3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, out, indices); | |
} | |
torch::autograd::increment_version(out); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(out, indices); | |
} | |
Tensor & add__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & add__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & add_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::add_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & addbmm_(c10::DispatchKeySet ks, Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addbmm_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, batch1, batch2, beta, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & addbmm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addbmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, batch1, batch2, beta, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & addcdiv_(c10::DispatchKeySet ks, Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addcdiv_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & addcdiv_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addcdiv_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, value, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & addcmul_(c10::DispatchKeySet ks, Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addcmul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & addcmul_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addcmul_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor1, tensor2, value, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & addmm_(c10::DispatchKeySet ks, Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addmm_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat1, mat2, beta, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & addmm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat1, mat2, beta, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & addmv_(c10::DispatchKeySet ks, Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addmv_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat, vec, beta, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & addmv_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addmv_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat, vec, beta, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & addr_(c10::DispatchKeySet ks, Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addr_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, vec1, vec2, beta, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & addr_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::addr_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, vec1, vec2, beta, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor alias(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::alias(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::alias(input_base); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & all_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::all_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & amax_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::amax_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & amin_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::amin_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & angle_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::angle_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & any_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::any_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & arange_out_start_out(c10::DispatchKeySet ks, Scalar start, Scalar end, Scalar step, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::arange_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), start, end, step, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & argmax_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<int64_t> dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::argmax_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & argmin_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<int64_t> dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::argmin_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor as_strided(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::as_strided(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, size, stride, storage_offset); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
auto size_vec = size.vec(); | |
auto stride_vec = stride.vec(); | |
auto storage_offset_val = storage_offset.value_or(0); | |
func = [=](const at::Tensor& input_base) { | |
return at::as_strided(input_base, size_vec, stride_vec, storage_offset_val); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & as_strided_(c10::DispatchKeySet ks, Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::as_strided_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, size, stride, storage_offset); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & asin_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::asin_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & asin_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::asin_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & asinh_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::asinh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & asinh_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::asinh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & atan2_(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::atan2_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & atan2_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::atan2_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & atan_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::atan_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & atan_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::atan_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & atanh_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::atanh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & atanh_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::atanh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & avg_pool2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::avg_pool2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & avg_pool2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::avg_pool2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & avg_pool3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::avg_pool3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & avg_pool3d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::avg_pool3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & baddbmm_(c10::DispatchKeySet ks, Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::baddbmm_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, batch1, batch2, beta, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & baddbmm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::baddbmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, batch1, batch2, beta, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & batch_norm_elemt_out_out(c10::DispatchKeySet ks, const Tensor & input, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & bias, const Tensor & mean, const Tensor & invstd, double eps, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::batch_norm_elemt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), input, weight, bias, mean, invstd, eps, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bernoulli__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & p, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bernoulli_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & bernoulli__float(c10::DispatchKeySet ks, Tensor & self, double p, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bernoulli_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & bernoulli_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bernoulli_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & binary_cross_entropy_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::binary_cross_entropy_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, weight, reduction, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & binary_cross_entropy_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::binary_cross_entropy_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, weight, reduction, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_and_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_and_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_and_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_and_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_not_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_not_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_or_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_or_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_or_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_or_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_xor_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_xor_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bitwise_xor_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bitwise_xor_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bmm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mat2, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat2, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & bucketize_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & boundaries, bool out_int32, bool right, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::bucketize_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, boundaries, out_int32, right, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cat_out_out(c10::DispatchKeySet ks, TensorList tensors, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cat_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), tensors, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cauchy_(c10::DispatchKeySet ks, Tensor & self, double median, double sigma, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cauchy_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, median, sigma, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & ceil_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ceil_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & ceil_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ceil_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & celu_(c10::DispatchKeySet ks, Tensor & self, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::celu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & cholesky_inverse_out_out(c10::DispatchKeySet ks, const Tensor & self, bool upper, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cholesky_inverse_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, upper, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cholesky_out_out(c10::DispatchKeySet ks, const Tensor & self, bool upper, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cholesky_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, upper, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cholesky_solve_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & input2, bool upper, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cholesky_solve_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, input2, upper, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & clamp_(c10::DispatchKeySet ks, Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::clamp_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, min, max); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & clamp_max_(c10::DispatchKeySet ks, Tensor & self, Scalar max) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::clamp_max_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, max); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & clamp_max_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar max, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::clamp_max_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, max, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & clamp_min_(c10::DispatchKeySet ks, Tensor & self, Scalar min) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::clamp_min_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, min); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & clamp_min_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar min, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::clamp_min_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, min, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & clamp_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::clamp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, min, max, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & col2im_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::col2im_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, kernel_size, dilation, padding, stride, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & col2im_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::col2im_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, kernel_size, dilation, padding, stride, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & complex_out_out(c10::DispatchKeySet ks, const Tensor & real, const Tensor & imag, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::complex_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), real, imag, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & conj_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::conj_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> conv_depthwise3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::conv_depthwise3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, weight, kernel_size, stride, padding, dilation, grad_input, grad_weight, grad_bias); | |
} | |
torch::autograd::increment_version(grad_input); | |
torch::autograd::increment_version(grad_weight); | |
torch::autograd::increment_version(grad_bias); | |
return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
} | |
Tensor & copy_sparse_to_sparse_(c10::DispatchKeySet ks, Tensor & self, const Tensor & src, bool non_blocking) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::copy_sparse_to_sparse_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, src, non_blocking); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & copysign__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::copysign_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & copysign__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::copysign_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & copysign_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::copysign_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cos_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cos_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & cos_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cos_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cosh_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cosh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & cosh_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cosh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cross_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, c10::optional<int64_t> dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cross_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> cummax_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cummax_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
std::tuple<Tensor &,Tensor &> cummin_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cummin_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor & cumprod_(c10::DispatchKeySet ks, Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cumprod_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, dtype); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & cumprod_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cumprod_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & cumsum_(c10::DispatchKeySet ks, Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cumsum_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, dtype); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & cumsum_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::cumsum_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & deg2rad_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::deg2rad_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & deg2rad_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::deg2rad_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & diag_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t diagonal, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::diag_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, diagonal, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor diagonal(c10::DispatchKeySet ks, const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::diagonal(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, offset, dim1, dim2); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::diagonal(input_base, offset, dim1, dim2); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & digamma_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::digamma_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & digamma_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::digamma_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & div__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & div__Tensor_mode(c10::DispatchKeySet ks, Tensor & self, const Tensor & other, std::string rounding_mode) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, rounding_mode); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & div__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & div__Scalar_mode(c10::DispatchKeySet ks, Tensor & self, Scalar other, std::string rounding_mode) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::div_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, rounding_mode); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & div_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::div_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & div_out_out_mode(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, std::string rounding_mode, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::div_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, rounding_mode, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & dot_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & tensor, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::dot_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, tensor, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> eig_out_e(c10::DispatchKeySet ks, const Tensor & self, bool eigenvectors, Tensor & e, Tensor & v) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eig_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, eigenvectors, e, v); | |
} | |
torch::autograd::increment_version(e); | |
torch::autograd::increment_version(v); | |
return std::forward_as_tuple(e, v); | |
} | |
Tensor & elu_(c10::DispatchKeySet ks, Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::elu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, alpha, scale, input_scale); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & elu_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::elu_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, alpha, scale, input_scale, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & embedding_renorm_(c10::DispatchKeySet ks, Tensor & self, const Tensor & indices, double max_norm, double norm_type) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::embedding_renorm_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, indices, max_norm, norm_type); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & eq__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eq_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & eq__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eq_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & eq_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eq_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & eq_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eq_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & erf_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::erf_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & erf_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::erf_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & erfc_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::erfc_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & erfc_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::erfc_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & erfinv_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::erfinv_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & erfinv_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::erfinv_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & exp2_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::exp2_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & exp2_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::exp2_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & exp_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::exp_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & exp_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::exp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor expand(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef size, bool implicit) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::expand(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, size, implicit); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
auto size_vec = size.vec(); | |
func = [=](const at::Tensor& input_base) { | |
return input_base.expand(size_vec, implicit); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & expm1_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::expm1_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & expm1_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::expm1_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & exponential_(c10::DispatchKeySet ks, Tensor & self, double lambd, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::exponential_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, lambd, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & eye_out_out(c10::DispatchKeySet ks, int64_t n, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eye_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), n, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & eye_out_m_out(c10::DispatchKeySet ks, int64_t n, int64_t m, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::eye_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), n, m, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & fill__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fill_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & fill__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fill_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & floor_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::floor_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & floor_divide__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::floor_divide_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & floor_divide_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::floor_divide_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & floor_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::floor_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & fmax_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fmax_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & fmin_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fmin_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & fmod__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fmod_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & fmod__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fmod_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & fmod_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fmod_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & fmod_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fmod_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & frac_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::frac_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & frac_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::frac_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & fractional_max_pool2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fractional_max_pool2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, kernel_size, output_size, indices, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out_output(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fractional_max_pool2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, output_size, random_samples, output, indices); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(output, indices); | |
} | |
Tensor & fractional_max_pool3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fractional_max_pool3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, kernel_size, output_size, indices, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> fractional_max_pool3d_out_output(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples, Tensor & output, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::fractional_max_pool3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, output_size, random_samples, output, indices); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(output, indices); | |
} | |
Tensor & gather_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::gather_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, sparse_grad, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & gcd_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::gcd_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & ge__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ge_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & ge__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ge_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & ge_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ge_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & ge_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ge_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & geometric_(c10::DispatchKeySet ks, Tensor & self, double p, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::geometric_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
std::tuple<Tensor &,Tensor &> geqrf_out_a(c10::DispatchKeySet ks, const Tensor & self, Tensor & a, Tensor & tau) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::geqrf_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, a, tau); | |
} | |
torch::autograd::increment_version(a); | |
torch::autograd::increment_version(tau); | |
return std::forward_as_tuple(a, tau); | |
} | |
Tensor & ger_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & vec2, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ger_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, vec2, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & glu_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, int64_t dim, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::glu_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, dim, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & glu_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::glu_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & gt__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::gt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & gt__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::gt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & gt_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::gt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & gt_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::gt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & hardsigmoid_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardsigmoid_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & hardsigmoid_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardsigmoid_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & hardswish_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardswish_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & hardswish_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardswish_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & hardtanh_(c10::DispatchKeySet ks, Tensor & self, Scalar min_val, Scalar max_val) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardtanh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, min_val, max_val); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & hardtanh_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardtanh_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, min_val, max_val, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & hardtanh_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar min_val, Scalar max_val, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hardtanh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, min_val, max_val, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & heaviside_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & values, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::heaviside_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, values, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & histc_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t bins, Scalar min, Scalar max, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::histc_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, bins, min, max, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & hspmm_out_out(c10::DispatchKeySet ks, const Tensor & mat1, const Tensor & mat2, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hspmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), mat1, mat2, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & huber_loss_backward_out_out(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::huber_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, reduction, delta, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & huber_loss_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, int64_t reduction, double delta, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::huber_loss_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, reduction, delta, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & hypot_(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hypot_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & hypot_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::hypot_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & i0_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::i0_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & i0_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::i0_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & igamma_(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::igamma_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & igamma_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::igamma_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & igammac_(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::igammac_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & igammac_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::igammac_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & im2col_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::im2col_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, input_size, kernel_size, dilation, padding, stride, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & im2col_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::im2col_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, dilation, padding, stride, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & index_add_(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::index_add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, source); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & index_copy_(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::index_copy_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, source); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & index_fill__int_Scalar(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::index_fill_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & index_fill__int_Tensor(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::index_fill_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & index_put_(c10::DispatchKeySet ks, Tensor & self, const c10::List<c10::optional<Tensor>> & indices, const Tensor & values, bool accumulate) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::index_put_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, indices, values, accumulate); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & index_select_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, const Tensor & index, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::index_select_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor indices(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::indices(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
auto result = torch::autograd::as_view(self, tmp, /* is_bw_differentiable */ false, /* is_fw_differentiable */ false); | |
return result; | |
} | |
Tensor & inverse_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::inverse_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & isneginf_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::isneginf_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & isposinf_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::isposinf_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> kthvalue_out_values(c10::DispatchKeySet ks, const Tensor & self, int64_t k, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::kthvalue_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, k, dim, keepdim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor & l1_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::l1_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, reduction, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & l1_loss_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::l1_loss_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, reduction, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & lcm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lcm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & le__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::le_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & le__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::le_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & le_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::le_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & le_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::le_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & leaky_relu_(c10::DispatchKeySet ks, Tensor & self, Scalar negative_slope) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::leaky_relu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, negative_slope); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & leaky_relu_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar negative_slope, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::leaky_relu_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, negative_slope, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & lerp__Scalar(c10::DispatchKeySet ks, Tensor & self, const Tensor & end, Scalar weight) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lerp_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, end, weight); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & lerp__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & end, const Tensor & weight) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lerp_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, end, weight); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & lerp_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & end, Scalar weight, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lerp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, end, weight, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & lerp_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & end, const Tensor & weight, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lerp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, end, weight, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & lgamma_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lgamma_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & lgamma_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lgamma_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & linalg_cholesky_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_cholesky_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> linalg_eigh_out_eigvals(c10::DispatchKeySet ks, const Tensor & self, std::string UPLO, Tensor & eigvals, Tensor & eigvecs) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_eigh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, UPLO, eigvals, eigvecs); | |
} | |
torch::autograd::increment_version(eigvals); | |
torch::autograd::increment_version(eigvecs); | |
return std::forward_as_tuple(eigvals, eigvecs); | |
} | |
Tensor & linalg_eigvalsh_out_out(c10::DispatchKeySet ks, const Tensor & self, std::string UPLO, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_eigvalsh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, UPLO, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & linalg_inv_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_inv_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> linalg_qr_out_out(c10::DispatchKeySet ks, const Tensor & self, std::string mode, Tensor & Q, Tensor & R) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_qr_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mode, Q, R); | |
} | |
torch::autograd::increment_version(Q); | |
torch::autograd::increment_version(R); | |
return std::forward_as_tuple(Q, R); | |
} | |
std::tuple<Tensor &,Tensor &> linalg_slogdet_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & sign, Tensor & logabsdet) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_slogdet_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, sign, logabsdet); | |
} | |
torch::autograd::increment_version(sign); | |
torch::autograd::increment_version(logabsdet); | |
return std::forward_as_tuple(sign, logabsdet); | |
} | |
Tensor & linalg_solve_out_out(c10::DispatchKeySet ks, const Tensor & input, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linalg_solve_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), input, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & linspace_out_out(c10::DispatchKeySet ks, Scalar start, Scalar end, c10::optional<int64_t> steps, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::linspace_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), start, end, steps, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & log10_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log10_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & log10_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log10_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & log1p_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log1p_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & log1p_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log1p_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & log2_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log2_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & log2_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log2_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & log_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & log_normal_(c10::DispatchKeySet ks, Tensor & self, double mean, double std, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log_normal_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mean, std, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & log_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & log_sigmoid_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & buffer, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log_sigmoid_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, buffer, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out_output(c10::DispatchKeySet ks, const Tensor & self, Tensor & output, Tensor & buffer) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::log_sigmoid_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output, buffer); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(buffer); | |
return std::forward_as_tuple(output, buffer); | |
} | |
Tensor & logaddexp2_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logaddexp2_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logaddexp_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logaddexp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logcumsumexp_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logcumsumexp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logical_and_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logical_and_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logical_not_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logical_not_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logical_or_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logical_or_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logical_xor_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logical_xor_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logit_(c10::DispatchKeySet ks, Tensor & self, c10::optional<double> eps) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logit_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, eps); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & logit_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, c10::optional<double> eps, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logit_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, eps, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & logit_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<double> eps, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logit_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, eps, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logspace_out_out(c10::DispatchKeySet ks, Scalar start, Scalar end, c10::optional<int64_t> steps, double base, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logspace_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), start, end, steps, base, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & logsumexp_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::logsumexp_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> lstsq_out_X(c10::DispatchKeySet ks, const Tensor & self, const Tensor & A, Tensor & X, Tensor & qr) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lstsq_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, A, X, qr); | |
} | |
torch::autograd::increment_version(X); | |
torch::autograd::increment_version(qr); | |
return std::forward_as_tuple(X, qr); | |
} | |
Tensor & lt__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & lt__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & lt_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & lt_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & lu_solve_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::lu_solve_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, LU_data, LU_pivots, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & masked_fill__Scalar(c10::DispatchKeySet ks, Tensor & self, const Tensor & mask, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::masked_fill_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mask, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & masked_fill__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & mask, const Tensor & value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::masked_fill_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mask, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & masked_scatter_(c10::DispatchKeySet ks, Tensor & self, const Tensor & mask, const Tensor & source) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::masked_scatter_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mask, source); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & masked_select_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mask, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::masked_select_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mask, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> max_out_dim_max(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & max, Tensor & max_values) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, max, max_values); | |
} | |
torch::autograd::increment_version(max); | |
torch::autograd::increment_version(max_values); | |
return std::forward_as_tuple(max, max_values); | |
} | |
Tensor & max_pool2d_with_indices_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_pool2d_with_indices_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> max_pool2d_with_indices_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_pool2d_with_indices_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); | |
} | |
torch::autograd::increment_version(out); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(out, indices); | |
} | |
Tensor & max_pool3d_with_indices_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_pool3d_with_indices_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor & out, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_pool3d_with_indices_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, kernel_size, stride, padding, dilation, ceil_mode, out, indices); | |
} | |
torch::autograd::increment_version(out); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(out, indices); | |
} | |
Tensor & max_unpool2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_unpool2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, indices, output_size, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & max_unpool2d_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & indices, IntArrayRef output_size, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_unpool2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, indices, output_size, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & max_unpool3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_unpool3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, indices, output_size, stride, padding, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & max_unpool3d_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::max_unpool3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, indices, output_size, stride, padding, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & maximum_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::maximum_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & mean_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mean_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> median_out_dim_values(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::median_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
std::tuple<Tensor &,Tensor &> min_out_dim_min(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & min, Tensor & min_indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::min_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, min, min_indices); | |
} | |
torch::autograd::increment_version(min); | |
torch::autograd::increment_version(min_indices); | |
return std::forward_as_tuple(min, min_indices); | |
} | |
Tensor & minimum_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::minimum_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & mm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mat2, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat2, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> mode_out_values(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mode_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor & mse_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mse_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, reduction, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & mse_loss_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mse_loss_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, reduction, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & mul__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & mul__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mul_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & mul_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mul_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & multi_margin_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const c10::optional<Tensor> & weight, int64_t reduction, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::multi_margin_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, p, margin, weight, reduction, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & multi_margin_loss_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const c10::optional<Tensor> & weight, int64_t reduction, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::multi_margin_loss_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, p, margin, weight, reduction, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & multilabel_margin_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::multilabel_margin_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, reduction, is_target, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out_output(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & output, Tensor & is_target) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::multilabel_margin_loss_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, reduction, output, is_target); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(is_target); | |
return std::forward_as_tuple(output, is_target); | |
} | |
Tensor & multinomial_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t num_samples, bool replacement, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::multinomial_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, num_samples, replacement, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & mv_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & vec, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mv_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, vec, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & mvlgamma_(c10::DispatchKeySet ks, Tensor & self, int64_t p) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::mvlgamma_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & nan_to_num_(c10::DispatchKeySet ks, Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nan_to_num_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, nan, posinf, neginf); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & nan_to_num_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nan_to_num_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, nan, posinf, neginf, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> nanmedian_out_dim_values(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nanmedian_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor & nansum_out_IntList_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nansum_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & narrow_copy_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, int64_t start, int64_t length, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::narrow_copy_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, start, length, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> native_batch_norm_out_out(c10::DispatchKeySet ks, const Tensor & input, const c10::optional<Tensor> & weight, const c10::optional<Tensor> & bias, const c10::optional<Tensor> & running_mean, const c10::optional<Tensor> & running_var, bool training, double momentum, double eps, Tensor & out, Tensor & save_mean, Tensor & save_invstd) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::native_batch_norm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), input, weight, bias, running_mean, running_var, training, momentum, eps, out, save_mean, save_invstd); | |
} | |
torch::autograd::increment_version(out); | |
torch::autograd::increment_version(save_mean); | |
torch::autograd::increment_version(save_invstd); | |
return std::forward_as_tuple(out, save_mean, save_invstd); | |
} | |
Tensor & ne__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ne_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & ne__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ne_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & ne_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ne_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & ne_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ne_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & neg_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::neg_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & neg_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::neg_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & nextafter_(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nextafter_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & nextafter_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nextafter_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & nll_loss2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nll_loss2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out_output(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, int64_t ignore_index, Tensor & output, Tensor & total_weight) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nll_loss2d_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, weight, reduction, ignore_index, output, total_weight); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(total_weight); | |
return std::forward_as_tuple(output, total_weight); | |
} | |
Tensor & nll_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nll_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, weight, reduction, ignore_index, total_weight, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
std::tuple<Tensor &,Tensor &> nll_loss_forward_out_output(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, const c10::optional<Tensor> & weight, int64_t reduction, int64_t ignore_index, Tensor & output, Tensor & total_weight) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nll_loss_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, weight, reduction, ignore_index, output, total_weight); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(total_weight); | |
return std::forward_as_tuple(output, total_weight); | |
} | |
Tensor & nonzero_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::nonzero_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & norm_out_dtype_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::norm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, dim, keepdim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & norm_out_out(c10::DispatchKeySet ks, const Tensor & self, c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::norm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, dim, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & normal_(c10::DispatchKeySet ks, Tensor & self, double mean, double std, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::normal_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mean, std, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & normal_out_Tensor_float_out(c10::DispatchKeySet ks, const Tensor & mean, double std, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::normal_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), mean, std, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & normal_out_float_Tensor_out(c10::DispatchKeySet ks, double mean, const Tensor & std, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::normal_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), mean, std, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & normal_out_Tensor_Tensor_out(c10::DispatchKeySet ks, const Tensor & mean, const Tensor & std, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::normal_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), mean, std, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & orgqr_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & input2, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::orgqr_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, input2, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & ormqr_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::ormqr_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, input2, input3, left, transpose, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor permute(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dims) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::permute(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dims); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
auto dims_vec = dims.vec(); | |
func = [=](const at::Tensor& input_base) { | |
return input_base.permute(dims_vec); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & polar_out_out(c10::DispatchKeySet ks, const Tensor & abs, const Tensor & angle, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::polar_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), abs, angle, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & polygamma_out_out(c10::DispatchKeySet ks, int64_t n, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::polygamma_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), n, self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & pow__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar exponent) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::pow_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, exponent); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & pow__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & exponent) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::pow_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, exponent); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & pow_out_Tensor_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & exponent, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::pow_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, exponent, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & pow_out_Scalar_out(c10::DispatchKeySet ks, Scalar self, const Tensor & exponent, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::pow_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, exponent, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & pow_out_Tensor_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar exponent, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::pow_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, exponent, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & prod_out_int_out(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool keepdim, c10::optional<ScalarType> dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::prod_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & put_(c10::DispatchKeySet ks, Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::put_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, index, source, accumulate); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & rad2deg_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::rad2deg_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & rad2deg_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::rad2deg_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & random__from(c10::DispatchKeySet ks, Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::random_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, from, to, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & random__to(c10::DispatchKeySet ks, Tensor & self, int64_t to, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::random_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, to, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & random_(c10::DispatchKeySet ks, Tensor & self, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::random_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & randperm_out_generator_out(c10::DispatchKeySet ks, int64_t n, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::randperm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), n, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & range_out_out(c10::DispatchKeySet ks, Scalar start, Scalar end, Scalar step, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::range_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), start, end, step, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & reciprocal_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::reciprocal_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & reciprocal_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::reciprocal_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & reflection_pad1d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::reflection_pad1d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, padding, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & reflection_pad1d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef padding, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::reflection_pad1d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, padding, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & reflection_pad2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::reflection_pad2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, padding, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & reflection_pad2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef padding, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::reflection_pad2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, padding, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & relu_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::relu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & remainder__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::remainder_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & remainder__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::remainder_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & remainder_out_Scalar_out(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::remainder_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & remainder_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::remainder_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & renorm_(c10::DispatchKeySet ks, Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::renorm_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, dim, maxnorm); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & renorm_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::renorm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, p, dim, maxnorm, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & replication_pad1d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::replication_pad1d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, padding, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & replication_pad1d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef padding, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::replication_pad1d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, padding, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & replication_pad2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::replication_pad2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, padding, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & replication_pad2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef padding, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::replication_pad2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, padding, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & replication_pad3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::replication_pad3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, padding, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & replication_pad3d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef padding, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::replication_pad3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, padding, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & round_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::round_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & round_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::round_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & rrelu_with_noise_(c10::DispatchKeySet ks, Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::rrelu_with_noise_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, noise, lower, upper, training, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & rrelu_with_noise_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<Generator> generator, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::rrelu_with_noise_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, noise, lower, upper, training, generator, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & rsqrt_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::rsqrt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & rsqrt_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::rsqrt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & scatter__src(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::scatter_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, src); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & scatter__value(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::scatter_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & scatter__reduce(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & src, std::string reduce) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::scatter_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, src, reduce); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & scatter__value_reduce(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, Scalar value, std::string reduce) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::scatter_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, value, reduce); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & scatter_add_(c10::DispatchKeySet ks, Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::scatter_add_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index, src); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & searchsorted_out_Tensor_out(c10::DispatchKeySet ks, const Tensor & sorted_sequence, const Tensor & self, bool out_int32, bool right, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::searchsorted_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), sorted_sequence, self, out_int32, right, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor select_int(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, int64_t index) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::select(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, index); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::select(input_base, dim, index); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & set__source_Storage(c10::DispatchKeySet ks, Tensor & self, Storage source) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::set_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, source); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & set__source_Storage_storage_offset(c10::DispatchKeySet ks, Tensor & self, Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::set_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, source, storage_offset, size, stride); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & set__source_Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & source) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::set_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, source); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & set_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::set_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sgn_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sgn_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sgn_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sgn_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sigmoid_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sigmoid_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sigmoid_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & output, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sigmoid_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & sigmoid_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sigmoid_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sign_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sign_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sign_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sign_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & signbit_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::signbit_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & silu_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::silu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & silu_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::silu_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sin_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sin_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sin_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sin_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sinc_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sinc_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sinc_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sinc_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sinh_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sinh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sinh_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sinh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor slice_Tensor(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, c10::optional<int64_t> start, c10::optional<int64_t> end, int64_t step) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::slice(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, start, end, step); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
auto start_val = start.value_or(0); | |
auto end_val = end.value_or(0); | |
func = [=](const at::Tensor& input_base) { | |
return at::slice(input_base, dim, start_val, end_val, step); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> slow_conv3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::slow_conv3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, grad_input, grad_weight, grad_bias); | |
} | |
torch::autograd::increment_version(grad_input); | |
torch::autograd::increment_version(grad_weight); | |
torch::autograd::increment_version(grad_bias); | |
return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> slow_conv3d_forward_out_output(c10::DispatchKeySet ks, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, Tensor & output, Tensor & finput, Tensor & fgrad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::slow_conv3d_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, weight, kernel_size, bias, stride, padding, output, finput, fgrad_input); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(finput); | |
torch::autograd::increment_version(fgrad_input); | |
return std::forward_as_tuple(output, finput, fgrad_input); | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> slow_conv_transpose2d_backward_out_grad_output(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::slow_conv_transpose2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, grad_input, grad_weight, grad_bias); | |
} | |
torch::autograd::increment_version(grad_input); | |
torch::autograd::increment_version(grad_weight); | |
torch::autograd::increment_version(grad_bias); | |
return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
} | |
Tensor & slow_conv_transpose2d_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::slow_conv_transpose2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> slow_conv_transpose3d_backward_out_grad_output(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::slow_conv_transpose3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, grad_input, grad_weight, grad_bias); | |
} | |
torch::autograd::increment_version(grad_input); | |
torch::autograd::increment_version(grad_weight); | |
torch::autograd::increment_version(grad_bias); | |
return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
} | |
Tensor & slow_conv_transpose3d_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::slow_conv_transpose3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, weight, kernel_size, bias, stride, padding, output_padding, dilation, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & smooth_l1_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::smooth_l1_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, reduction, beta, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & smooth_l1_loss_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, int64_t reduction, double beta, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::smooth_l1_loss_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, reduction, beta, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & soft_margin_loss_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::soft_margin_loss_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, target, reduction, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & soft_margin_loss_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & target, int64_t reduction, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::soft_margin_loss_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, target, reduction, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & softplus_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::softplus_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, beta, threshold, output, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & softplus_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar beta, Scalar threshold, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::softplus_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, beta, threshold, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & softshrink_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, Scalar lambd, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::softshrink_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, lambd, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & softshrink_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar lambd, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::softshrink_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, lambd, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> solve_out_solution(c10::DispatchKeySet ks, const Tensor & self, const Tensor & A, Tensor & solution, Tensor & lu) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::solve_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, A, solution, lu); | |
} | |
torch::autograd::increment_version(solution); | |
torch::autograd::increment_version(lu); | |
return std::forward_as_tuple(solution, lu); | |
} | |
std::tuple<Tensor &,Tensor &> sort_out_values(c10::DispatchKeySet ks, const Tensor & self, int64_t dim, bool descending, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sort_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, descending, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
std::tuple<Tensor &,Tensor &> sort_out_values_stable(c10::DispatchKeySet ks, const Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sort_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, stable, dim, descending, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor & sparse_resize_(c10::DispatchKeySet ks, Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sparse_resize_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, size, sparse_dim, dense_dim); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sparse_resize_and_clear_(c10::DispatchKeySet ks, Tensor & self, IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sparse_resize_and_clear_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, size, sparse_dim, dense_dim); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & special_gammaln_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::special_gammaln_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::vector<Tensor> split_Tensor(c10::DispatchKeySet ks, const Tensor & self, int64_t split_size, int64_t dim) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::split(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, split_size, dim); | |
})(); | |
torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* creation_meta */ torch::autograd::CreationMeta::MULTI_OUTPUT_SAFE); | |
auto result = std::move(tmp); | |
return result; | |
} | |
std::vector<Tensor> split_with_sizes(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef split_sizes, int64_t dim) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::split_with_sizes(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, split_sizes, dim); | |
})(); | |
torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* creation_meta */ torch::autograd::CreationMeta::MULTI_OUTPUT_SAFE); | |
auto result = std::move(tmp); | |
return result; | |
} | |
Tensor & sqrt_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sqrt_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sqrt_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sqrt_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor squeeze(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::squeeze(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::squeeze(input_base); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor squeeze_dim(c10::DispatchKeySet ks, const Tensor & self, int64_t dim) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::squeeze(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::squeeze(input_base, dim); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & squeeze_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::squeeze_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & squeeze__dim(c10::DispatchKeySet ks, Tensor & self, int64_t dim) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::squeeze_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sspaddmm_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sspaddmm_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, mat1, mat2, beta, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & stack_out_out(c10::DispatchKeySet ks, TensorList tensors, int64_t dim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::stack_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), tensors, dim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & std_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::std_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, unbiased, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sub__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sub_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sub__Scalar(c10::DispatchKeySet ks, Tensor & self, Scalar other, Scalar alpha) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sub_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & sub_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Scalar alpha, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sub_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, alpha, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & sum_out_IntList_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::sum_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, keepdim, dtype, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> symeig_out_e(c10::DispatchKeySet ks, const Tensor & self, bool eigenvectors, bool upper, Tensor & e, Tensor & V) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::symeig_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, eigenvectors, upper, e, V); | |
} | |
torch::autograd::increment_version(e); | |
torch::autograd::increment_version(V); | |
return std::forward_as_tuple(e, V); | |
} | |
Tensor t(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::t(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::t(input_base); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & t_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::t_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & take_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & index, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::take_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, index, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & tan_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tan_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & tan_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tan_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & tanh_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tanh_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & tanh_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & output, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tanh_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & tanh_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tanh_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & tensordot_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tensordot_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, dims_self, dims_other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::thnn_conv2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, grad_input, grad_weight, grad_bias); | |
} | |
torch::autograd::increment_version(grad_input); | |
torch::autograd::increment_version(grad_weight); | |
torch::autograd::increment_version(grad_bias); | |
return std::forward_as_tuple(grad_input, grad_weight, grad_bias); | |
} | |
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out_output(c10::DispatchKeySet ks, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, Tensor & output, Tensor & finput, Tensor & fgrad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::thnn_conv2d_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, weight, kernel_size, bias, stride, padding, output, finput, fgrad_input); | |
} | |
torch::autograd::increment_version(output); | |
torch::autograd::increment_version(finput); | |
torch::autograd::increment_version(fgrad_input); | |
return std::forward_as_tuple(output, finput, fgrad_input); | |
} | |
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & grad_input, Tensor & grad_weight) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::thnn_conv_depthwise2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, self, weight, kernel_size, stride, padding, dilation, grad_input, grad_weight); | |
} | |
torch::autograd::increment_version(grad_input); | |
torch::autograd::increment_version(grad_weight); | |
return std::forward_as_tuple(grad_input, grad_weight); | |
} | |
Tensor & thnn_conv_depthwise2d_forward_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const c10::optional<Tensor> & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::thnn_conv_depthwise2d_forward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, weight, kernel_size, bias, stride, padding, dilation, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & threshold_(c10::DispatchKeySet ks, Tensor & self, Scalar threshold, Scalar value) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::threshold_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, threshold, value); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & threshold_out_out(c10::DispatchKeySet ks, const Tensor & self, Scalar threshold, Scalar value, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::threshold_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, threshold, value, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::tuple<Tensor &,Tensor &> topk_out_values(c10::DispatchKeySet ks, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted, Tensor & values, Tensor & indices) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::topk_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, k, dim, largest, sorted, values, indices); | |
} | |
torch::autograd::increment_version(values); | |
torch::autograd::increment_version(indices); | |
return std::forward_as_tuple(values, indices); | |
} | |
Tensor transpose_int(c10::DispatchKeySet ks, const Tensor & self, int64_t dim0, int64_t dim1) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::transpose(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim0, dim1); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::transpose(input_base, dim0, dim1); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & transpose_(c10::DispatchKeySet ks, Tensor & self, int64_t dim0, int64_t dim1) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::transpose_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim0, dim1); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
std::tuple<Tensor &,Tensor &> triangular_solve_out_X(c10::DispatchKeySet ks, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular, Tensor & X, Tensor & M) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::triangular_solve_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, A, upper, transpose, unitriangular, X, M); | |
} | |
torch::autograd::increment_version(X); | |
torch::autograd::increment_version(M); | |
return std::forward_as_tuple(X, M); | |
} | |
Tensor & tril_(c10::DispatchKeySet ks, Tensor & self, int64_t diagonal) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tril_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, diagonal); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & tril_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t diagonal, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::tril_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, diagonal, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & triu_(c10::DispatchKeySet ks, Tensor & self, int64_t diagonal) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::triu_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, diagonal); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & triu_out_out(c10::DispatchKeySet ks, const Tensor & self, int64_t diagonal, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::triu_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, diagonal, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & trunc_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::trunc_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & trunc_out_out(c10::DispatchKeySet ks, const Tensor & self, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::trunc_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
std::vector<Tensor> unbind_int(c10::DispatchKeySet ks, const Tensor & self, int64_t dim) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::unbind(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim); | |
})(); | |
torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* creation_meta */ torch::autograd::CreationMeta::MULTI_OUTPUT_NODE); | |
auto result = std::move(tmp); | |
return result; | |
} | |
Tensor unfold(c10::DispatchKeySet ks, const Tensor & self, int64_t dimension, int64_t size, int64_t step) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::unfold(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dimension, size, step); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return input_base.unfold(dimension, size, step); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & uniform_(c10::DispatchKeySet ks, Tensor & self, double from, double to, c10::optional<Generator> generator) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::uniform_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, from, to, generator); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor unsqueeze(c10::DispatchKeySet ks, const Tensor & self, int64_t dim) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::unsqueeze(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::unsqueeze(input_base, dim); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & unsqueeze_(c10::DispatchKeySet ks, Tensor & self, int64_t dim) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::unsqueeze_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & upsample_bicubic2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_bicubic2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_bicubic2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_bicubic2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, align_corners, scales_h, scales_w, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & upsample_bilinear2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_bilinear2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, align_corners, scales_h, scales_w, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_bilinear2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_bilinear2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, align_corners, scales_h, scales_w, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & upsample_linear1d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_linear1d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, align_corners, scales, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_linear1d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_linear1d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, align_corners, scales, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & upsample_nearest1d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_nearest1d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, scales, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_nearest1d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, c10::optional<double> scales, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_nearest1d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, scales, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & upsample_nearest2d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_nearest2d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, scales_h, scales_w, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_nearest2d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_nearest2d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, scales_h, scales_w, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & upsample_nearest3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_nearest3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, scales_d, scales_h, scales_w, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_nearest3d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_nearest3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, scales_d, scales_h, scales_w, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & upsample_trilinear3d_backward_out_grad_input(c10::DispatchKeySet ks, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & grad_input) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_trilinear3d_backward_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w, grad_input); | |
} | |
torch::autograd::increment_version(grad_input); | |
return grad_input; | |
} | |
Tensor & upsample_trilinear3d_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::upsample_trilinear3d_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, output_size, align_corners, scales_d, scales_h, scales_w, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor values(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::values(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return input_base.values(); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & var_out_out(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::var_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dim, unbiased, keepdim, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & vdot_out_out(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::vdot_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor view(c10::DispatchKeySet ks, const Tensor & self, IntArrayRef size) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::view(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, size); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (false || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
auto size_vec = size.vec(); | |
func = [=](const at::Tensor& input_base) { | |
return input_base.view(size_vec); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor view_dtype(c10::DispatchKeySet ks, const Tensor & self, ScalarType dtype) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::view(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, dtype); | |
})(); | |
auto result = torch::autograd::as_view(self, tmp, /* is_bw_differentiable */ false, /* is_fw_differentiable */ false); | |
return result; | |
} | |
Tensor view_as_complex(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::view_as_complex(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (true || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::view_as_complex(input_base); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor view_as_real(c10::DispatchKeySet ks, const Tensor & self) { | |
auto tmp = ([&]() { | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
return at::redispatch::view_as_real(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
})(); | |
std::function<at::Tensor(const at::Tensor&)> func=nullptr; | |
if (true || !self.unsafeGetTensorImpl()->support_as_strided()) { | |
func = [=](const at::Tensor& input_base) { | |
return at::view_as_real(input_base); | |
}; | |
} | |
auto result = torch::autograd::as_view(/* base */ self, /* output */ tmp, /* is_bw_differentiable */ true, /* is_fw_differentiable */ true, /* view_func */ func, /* creation_meta */ at::GradMode::is_enabled() ? torch::autograd::CreationMeta::DEFAULT : torch::autograd::CreationMeta::NO_GRAD_MODE); | |
return result; | |
} | |
Tensor & xlogy__Tensor(c10::DispatchKeySet ks, Tensor & self, const Tensor & other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::xlogy_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & xlogy__Scalar_Other(c10::DispatchKeySet ks, Tensor & self, Scalar other) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::xlogy_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
Tensor & xlogy_out_OutTensor(c10::DispatchKeySet ks, const Tensor & self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::xlogy_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & xlogy_out_OutScalar_Self(c10::DispatchKeySet ks, Scalar self, const Tensor & other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::xlogy_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & xlogy_out_OutScalar_Other(c10::DispatchKeySet ks, const Tensor & self, Scalar other, Tensor & out) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::xlogy_outf(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self, other, out); | |
} | |
torch::autograd::increment_version(out); | |
return out; | |
} | |
Tensor & zero_(c10::DispatchKeySet ks, Tensor & self) { | |
{ | |
at::AutoDispatchBelowInplaceOrView guard(true); | |
at::redispatch::zero_(ks & c10::DispatchKeySet(c10::DispatchKeySet::FULL_AFTER, c10::DispatchKey::InplaceOrView), self); | |
} | |
torch::autograd::increment_version(self); | |
return self; | |
} | |
} // namespace | |
} // namespace InplaceOrView | |
namespace { | |
TORCH_LIBRARY_IMPL(aten, InplaceOrView, m) { | |
m.impl("__ilshift__.Scalar", | |
TORCH_FN(InplaceOrView::__ilshift___Scalar) | |
); | |
m.impl("__ilshift__.Tensor", | |
TORCH_FN(InplaceOrView::__ilshift___Tensor) | |
); | |
m.impl("__irshift__.Scalar", | |
TORCH_FN(InplaceOrView::__irshift___Scalar) | |
); | |
m.impl("__irshift__.Tensor", | |
TORCH_FN(InplaceOrView::__irshift___Tensor) | |
); | |
m.impl("_add_relu_.Tensor", | |
TORCH_FN(InplaceOrView::_add_relu__Tensor) | |
); | |
m.impl("_add_relu.out", | |
TORCH_FN(InplaceOrView::_add_relu_out_out) | |
); | |
m.impl("_addmv_impl_", | |
TORCH_FN(InplaceOrView::_addmv_impl_) | |
); | |
m.impl("_amp_foreach_non_finite_check_and_unscale_", | |
TORCH_FN(InplaceOrView::_amp_foreach_non_finite_check_and_unscale_) | |
); | |
m.impl("_bmm.out", | |
TORCH_FN(InplaceOrView::_bmm_out_out) | |
); | |
m.impl("_cat.out", | |
TORCH_FN(InplaceOrView::_cat_out_out) | |
); | |
m.impl("_coalesced_", | |
TORCH_FN(InplaceOrView::_coalesced_) | |
); | |
m.impl("_compute_linear_combination.out", | |
TORCH_FN(InplaceOrView::_compute_linear_combination_out_out) | |
); | |
m.impl("_cumprod.out", | |
TORCH_FN(InplaceOrView::_cumprod_out_out) | |
); | |
m.impl("_cumsum.out", | |
TORCH_FN(InplaceOrView::_cumsum_out_out) | |
); | |
m.impl("_fft_c2c.out", | |
TORCH_FN(InplaceOrView::_fft_c2c_out_out) | |
); | |
m.impl("_fft_c2r.out", | |
TORCH_FN(InplaceOrView::_fft_c2r_out_out) | |
); | |
m.impl("_fft_r2c.out", | |
TORCH_FN(InplaceOrView::_fft_r2c_out_out) | |
); | |
m.impl("_foreach_abs_", | |
TORCH_FN(InplaceOrView::_foreach_abs_) | |
); | |
m.impl("_foreach_acos_", | |
TORCH_FN(InplaceOrView::_foreach_acos_) | |
); | |
m.impl("_foreach_add_.Scalar", | |
TORCH_FN(InplaceOrView::_foreach_add__Scalar) | |
); | |
m.impl("_foreach_add_.List", | |
TORCH_FN(InplaceOrView::_foreach_add__List) | |
); | |
m.impl("_foreach_add_.ScalarList", | |
TORCH_FN(InplaceOrView::_foreach_add__ScalarList) | |
); | |
m.impl("_foreach_addcdiv_.Scalar", | |
TORCH_FN(InplaceOrView::_foreach_addcdiv__Scalar) | |
); | |
m.impl("_foreach_addcdiv_.ScalarList", | |
TORCH_FN(InplaceOrView::_foreach_addcdiv__ScalarList) | |
); | |
m.impl("_foreach_addcmul_.Scalar", | |
TORCH_FN(InplaceOrView::_foreach_addcmul__Scalar) | |
); | |
m.impl("_foreach_addcmul_.ScalarList", | |
TORCH_FN(InplaceOrView::_foreach_addcmul__ScalarList) | |
); | |
m.impl("_foreach_asin_", | |
TORCH_FN(InplaceOrView::_foreach_asin_) | |
); | |
m.impl("_foreach_atan_", | |
TORCH_FN(InplaceOrView::_foreach_atan_) | |
); | |
m.impl("_foreach_ceil_", | |
TORCH_FN(InplaceOrView::_foreach_ceil_) | |
); | |
m.impl("_foreach_cos_", | |
TORCH_FN(InplaceOrView::_foreach_cos_) | |
); | |
m.impl("_foreach_cosh_", | |
TORCH_FN(InplaceOrView::_foreach_cosh_) | |
); | |
m.impl("_foreach_div_.Scalar", | |
TORCH_FN(InplaceOrView::_foreach_div__Scalar) | |
); | |
m.impl("_foreach_div_.List", | |
TORCH_FN(InplaceOrView::_foreach_div__List) | |
); | |
m.impl("_foreach_div_.ScalarList", | |
TORCH_FN(InplaceOrView::_foreach_div__ScalarList) | |
); | |
m.impl("_foreach_erf_", | |
TORCH_FN(InplaceOrView::_foreach_erf_) | |
); | |
m.impl("_foreach_erfc_", | |
TORCH_FN(InplaceOrView::_foreach_erfc_) | |
); | |
m.impl("_foreach_exp_", | |
TORCH_FN(InplaceOrView::_foreach_exp_) | |
); | |
m.impl("_foreach_expm1_", | |
TORCH_FN(InplaceOrView::_foreach_expm1_) | |
); | |
m.impl("_foreach_floor_", | |
TORCH_FN(InplaceOrView::_foreach_floor_) | |
); | |
m.impl("_foreach_frac_", | |
TORCH_FN(InplaceOrView::_foreach_frac_) | |
); | |
m.impl("_foreach_lgamma_", | |
TORCH_FN(InplaceOrView::_foreach_lgamma_) | |
); | |
m.impl("_foreach_log10_", | |
TORCH_FN(InplaceOrView::_foreach_log10_) | |
); | |
m.impl("_foreach_log1p_", | |
TORCH_FN(InplaceOrView::_foreach_log1p_) | |
); | |
m.impl("_foreach_log2_", | |
TORCH_FN(InplaceOrView::_foreach_log2_) | |
); | |
m.impl("_foreach_log_", | |
TORCH_FN(InplaceOrView::_foreach_log_) | |
); | |
m.impl("_foreach_mul_.Scalar", | |
TORCH_FN(InplaceOrView::_foreach_mul__Scalar) | |
); | |
m.impl("_foreach_mul_.List", | |
TORCH_FN(InplaceOrView::_foreach_mul__List) | |
); | |
m.impl("_foreach_mul_.ScalarList", | |
TORCH_FN(InplaceOrView::_foreach_mul__ScalarList) | |
); | |
m.impl("_foreach_neg_", | |
TORCH_FN(InplaceOrView::_foreach_neg_) | |
); | |
m.impl("_foreach_reciprocal_", | |
TORCH_FN(InplaceOrView::_foreach_reciprocal_) | |
); | |
m.impl("_foreach_round_", | |
TORCH_FN(InplaceOrView::_foreach_round_) | |
); | |
m.impl("_foreach_sigmoid_", | |
TORCH_FN(InplaceOrView::_foreach_sigmoid_) | |
); | |
m.impl("_foreach_sin_", | |
TORCH_FN(InplaceOrView::_foreach_sin_) | |
); | |
m.impl("_foreach_sinh_", | |
TORCH_FN(InplaceOrView::_foreach_sinh_) | |
); | |
m.impl("_foreach_sqrt_", | |
TORCH_FN(InplaceOrView::_foreach_sqrt_) | |
); | |
m.impl("_foreach_sub_.Scalar", | |
TORCH_FN(InplaceOrView::_foreach_sub__Scalar) | |
); | |
m.impl("_foreach_sub_.List", | |
TORCH_FN(InplaceOrView::_foreach_sub__List) | |
); | |
m.impl("_foreach_sub_.ScalarList", | |
TORCH_FN(InplaceOrView::_foreach_sub__ScalarList) | |
); | |
m.impl("_foreach_tan_", | |
TORCH_FN(InplaceOrView::_foreach_tan_) | |
); | |
m.impl("_foreach_tanh_", | |
TORCH_FN(InplaceOrView::_foreach_tanh_) | |
); | |
m.impl("_foreach_trunc_", | |
TORCH_FN(InplaceOrView::_foreach_trunc_) | |
); | |
m.impl("_foreach_zero_", | |
TORCH_FN(InplaceOrView::_foreach_zero_) | |
); | |
m.impl("_index_copy_", | |
TORCH_FN(InplaceOrView::_index_copy_) | |
); | |
m.impl("_index_put_impl_", | |
TORCH_FN(InplaceOrView::_index_put_impl_) | |
); | |
m.impl("_indices", | |
TORCH_FN(InplaceOrView::_indices) | |
); | |
m.impl("_linalg_inv_out_helper_", | |
TORCH_FN(InplaceOrView::_linalg_inv_out_helper_) | |
); | |
m.impl("_linalg_solve_out_helper_", | |
TORCH_FN(InplaceOrView::_linalg_solve_out_helper_) | |
); | |
m.impl("_logcumsumexp.out", | |
TORCH_FN(InplaceOrView::_logcumsumexp_out_out) | |
); | |
m.impl("_mkldnn_transpose_", | |
TORCH_FN(InplaceOrView::_mkldnn_transpose_) | |
); | |
m.impl("_mode.values", | |
TORCH_FN(InplaceOrView::_mode_out_values) | |
); | |
m.impl("_stack.out", | |
TORCH_FN(InplaceOrView::_stack_out_out) | |
); | |
m.impl("_values", | |
TORCH_FN(InplaceOrView::_values) | |
); | |
m.impl("abs_", | |
TORCH_FN(InplaceOrView::abs_) | |
); | |
m.impl("abs.out", | |
TORCH_FN(InplaceOrView::abs_out_out) | |
); | |
m.impl("acos_", | |
TORCH_FN(InplaceOrView::acos_) | |
); | |
m.impl("acos.out", | |
TORCH_FN(InplaceOrView::acos_out_out) | |
); | |
m.impl("acosh_", | |
TORCH_FN(InplaceOrView::acosh_) | |
); | |
m.impl("acosh.out", | |
TORCH_FN(InplaceOrView::acosh_out_out) | |
); | |
m.impl("adaptive_avg_pool2d.out", | |
TORCH_FN(InplaceOrView::adaptive_avg_pool2d_out_out) | |
); | |
m.impl("adaptive_avg_pool3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::adaptive_avg_pool3d_backward_out_grad_input) | |
); | |
m.impl("adaptive_avg_pool3d.out", | |
TORCH_FN(InplaceOrView::adaptive_avg_pool3d_out_out) | |
); | |
m.impl("adaptive_max_pool2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::adaptive_max_pool2d_backward_out_grad_input) | |
); | |
m.impl("adaptive_max_pool2d.out", | |
TORCH_FN(InplaceOrView::adaptive_max_pool2d_out_out) | |
); | |
m.impl("adaptive_max_pool3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::adaptive_max_pool3d_backward_out_grad_input) | |
); | |
m.impl("adaptive_max_pool3d.out", | |
TORCH_FN(InplaceOrView::adaptive_max_pool3d_out_out) | |
); | |
m.impl("add_.Tensor", | |
TORCH_FN(InplaceOrView::add__Tensor) | |
); | |
m.impl("add_.Scalar", | |
TORCH_FN(InplaceOrView::add__Scalar) | |
); | |
m.impl("add.out", | |
TORCH_FN(InplaceOrView::add_out_out) | |
); | |
m.impl("addbmm_", | |
TORCH_FN(InplaceOrView::addbmm_) | |
); | |
m.impl("addbmm.out", | |
TORCH_FN(InplaceOrView::addbmm_out_out) | |
); | |
m.impl("addcdiv_", | |
TORCH_FN(InplaceOrView::addcdiv_) | |
); | |
m.impl("addcdiv.out", | |
TORCH_FN(InplaceOrView::addcdiv_out_out) | |
); | |
m.impl("addcmul_", | |
TORCH_FN(InplaceOrView::addcmul_) | |
); | |
m.impl("addcmul.out", | |
TORCH_FN(InplaceOrView::addcmul_out_out) | |
); | |
m.impl("addmm_", | |
TORCH_FN(InplaceOrView::addmm_) | |
); | |
m.impl("addmm.out", | |
TORCH_FN(InplaceOrView::addmm_out_out) | |
); | |
m.impl("addmv_", | |
TORCH_FN(InplaceOrView::addmv_) | |
); | |
m.impl("addmv.out", | |
TORCH_FN(InplaceOrView::addmv_out_out) | |
); | |
m.impl("addr_", | |
TORCH_FN(InplaceOrView::addr_) | |
); | |
m.impl("addr.out", | |
TORCH_FN(InplaceOrView::addr_out_out) | |
); | |
m.impl("alias", | |
TORCH_FN(InplaceOrView::alias) | |
); | |
m.impl("all.out", | |
TORCH_FN(InplaceOrView::all_out_out) | |
); | |
m.impl("amax.out", | |
TORCH_FN(InplaceOrView::amax_out_out) | |
); | |
m.impl("amin.out", | |
TORCH_FN(InplaceOrView::amin_out_out) | |
); | |
m.impl("angle.out", | |
TORCH_FN(InplaceOrView::angle_out_out) | |
); | |
m.impl("any.out", | |
TORCH_FN(InplaceOrView::any_out_out) | |
); | |
m.impl("arange.start_out", | |
TORCH_FN(InplaceOrView::arange_out_start_out) | |
); | |
m.impl("argmax.out", | |
TORCH_FN(InplaceOrView::argmax_out_out) | |
); | |
m.impl("argmin.out", | |
TORCH_FN(InplaceOrView::argmin_out_out) | |
); | |
m.impl("as_strided", | |
TORCH_FN(InplaceOrView::as_strided) | |
); | |
m.impl("as_strided_", | |
TORCH_FN(InplaceOrView::as_strided_) | |
); | |
m.impl("asin_", | |
TORCH_FN(InplaceOrView::asin_) | |
); | |
m.impl("asin.out", | |
TORCH_FN(InplaceOrView::asin_out_out) | |
); | |
m.impl("asinh_", | |
TORCH_FN(InplaceOrView::asinh_) | |
); | |
m.impl("asinh.out", | |
TORCH_FN(InplaceOrView::asinh_out_out) | |
); | |
m.impl("atan2_", | |
TORCH_FN(InplaceOrView::atan2_) | |
); | |
m.impl("atan2.out", | |
TORCH_FN(InplaceOrView::atan2_out_out) | |
); | |
m.impl("atan_", | |
TORCH_FN(InplaceOrView::atan_) | |
); | |
m.impl("atan.out", | |
TORCH_FN(InplaceOrView::atan_out_out) | |
); | |
m.impl("atanh_", | |
TORCH_FN(InplaceOrView::atanh_) | |
); | |
m.impl("atanh.out", | |
TORCH_FN(InplaceOrView::atanh_out_out) | |
); | |
m.impl("avg_pool2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::avg_pool2d_backward_out_grad_input) | |
); | |
m.impl("avg_pool2d.out", | |
TORCH_FN(InplaceOrView::avg_pool2d_out_out) | |
); | |
m.impl("avg_pool3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::avg_pool3d_backward_out_grad_input) | |
); | |
m.impl("avg_pool3d.out", | |
TORCH_FN(InplaceOrView::avg_pool3d_out_out) | |
); | |
m.impl("baddbmm_", | |
TORCH_FN(InplaceOrView::baddbmm_) | |
); | |
m.impl("baddbmm.out", | |
TORCH_FN(InplaceOrView::baddbmm_out_out) | |
); | |
m.impl("batch_norm_elemt.out", | |
TORCH_FN(InplaceOrView::batch_norm_elemt_out_out) | |
); | |
m.impl("bernoulli_.Tensor", | |
TORCH_FN(InplaceOrView::bernoulli__Tensor) | |
); | |
m.impl("bernoulli_.float", | |
TORCH_FN(InplaceOrView::bernoulli__float) | |
); | |
m.impl("bernoulli.out", | |
TORCH_FN(InplaceOrView::bernoulli_out_out) | |
); | |
m.impl("binary_cross_entropy_backward.grad_input", | |
TORCH_FN(InplaceOrView::binary_cross_entropy_backward_out_grad_input) | |
); | |
m.impl("binary_cross_entropy.out", | |
TORCH_FN(InplaceOrView::binary_cross_entropy_out_out) | |
); | |
m.impl("bitwise_and.Tensor_out", | |
TORCH_FN(InplaceOrView::bitwise_and_out_Tensor_out) | |
); | |
m.impl("bitwise_and.Scalar_out", | |
TORCH_FN(InplaceOrView::bitwise_and_out_Scalar_out) | |
); | |
m.impl("bitwise_not.out", | |
TORCH_FN(InplaceOrView::bitwise_not_out_out) | |
); | |
m.impl("bitwise_or.Tensor_out", | |
TORCH_FN(InplaceOrView::bitwise_or_out_Tensor_out) | |
); | |
m.impl("bitwise_or.Scalar_out", | |
TORCH_FN(InplaceOrView::bitwise_or_out_Scalar_out) | |
); | |
m.impl("bitwise_xor.Tensor_out", | |
TORCH_FN(InplaceOrView::bitwise_xor_out_Tensor_out) | |
); | |
m.impl("bitwise_xor.Scalar_out", | |
TORCH_FN(InplaceOrView::bitwise_xor_out_Scalar_out) | |
); | |
m.impl("bmm.out", | |
TORCH_FN(InplaceOrView::bmm_out_out) | |
); | |
m.impl("bucketize.Tensor_out", | |
TORCH_FN(InplaceOrView::bucketize_out_Tensor_out) | |
); | |
m.impl("cat.out", | |
TORCH_FN(InplaceOrView::cat_out_out) | |
); | |
m.impl("cauchy_", | |
TORCH_FN(InplaceOrView::cauchy_) | |
); | |
m.impl("ceil_", | |
TORCH_FN(InplaceOrView::ceil_) | |
); | |
m.impl("ceil.out", | |
TORCH_FN(InplaceOrView::ceil_out_out) | |
); | |
m.impl("celu_", | |
TORCH_FN(InplaceOrView::celu_) | |
); | |
m.impl("cholesky_inverse.out", | |
TORCH_FN(InplaceOrView::cholesky_inverse_out_out) | |
); | |
m.impl("cholesky.out", | |
TORCH_FN(InplaceOrView::cholesky_out_out) | |
); | |
m.impl("cholesky_solve.out", | |
TORCH_FN(InplaceOrView::cholesky_solve_out_out) | |
); | |
m.impl("clamp_", | |
TORCH_FN(InplaceOrView::clamp_) | |
); | |
m.impl("clamp_max_", | |
TORCH_FN(InplaceOrView::clamp_max_) | |
); | |
m.impl("clamp_max.out", | |
TORCH_FN(InplaceOrView::clamp_max_out_out) | |
); | |
m.impl("clamp_min_", | |
TORCH_FN(InplaceOrView::clamp_min_) | |
); | |
m.impl("clamp_min.out", | |
TORCH_FN(InplaceOrView::clamp_min_out_out) | |
); | |
m.impl("clamp.out", | |
TORCH_FN(InplaceOrView::clamp_out_out) | |
); | |
m.impl("col2im_backward.grad_input", | |
TORCH_FN(InplaceOrView::col2im_backward_out_grad_input) | |
); | |
m.impl("col2im.out", | |
TORCH_FN(InplaceOrView::col2im_out_out) | |
); | |
m.impl("complex.out", | |
TORCH_FN(InplaceOrView::complex_out_out) | |
); | |
m.impl("conj.out", | |
TORCH_FN(InplaceOrView::conj_out_out) | |
); | |
m.impl("conv_depthwise3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::conv_depthwise3d_backward_out_grad_input) | |
); | |
m.impl("copy_sparse_to_sparse_", | |
TORCH_FN(InplaceOrView::copy_sparse_to_sparse_) | |
); | |
m.impl("copysign_.Tensor", | |
TORCH_FN(InplaceOrView::copysign__Tensor) | |
); | |
m.impl("copysign_.Scalar", | |
TORCH_FN(InplaceOrView::copysign__Scalar) | |
); | |
m.impl("copysign.out", | |
TORCH_FN(InplaceOrView::copysign_out_out) | |
); | |
m.impl("cos_", | |
TORCH_FN(InplaceOrView::cos_) | |
); | |
m.impl("cos.out", | |
TORCH_FN(InplaceOrView::cos_out_out) | |
); | |
m.impl("cosh_", | |
TORCH_FN(InplaceOrView::cosh_) | |
); | |
m.impl("cosh.out", | |
TORCH_FN(InplaceOrView::cosh_out_out) | |
); | |
m.impl("cross.out", | |
TORCH_FN(InplaceOrView::cross_out_out) | |
); | |
m.impl("cummax.out", | |
TORCH_FN(InplaceOrView::cummax_out_out) | |
); | |
m.impl("cummin.out", | |
TORCH_FN(InplaceOrView::cummin_out_out) | |
); | |
m.impl("cumprod_", | |
TORCH_FN(InplaceOrView::cumprod_) | |
); | |
m.impl("cumprod.out", | |
TORCH_FN(InplaceOrView::cumprod_out_out) | |
); | |
m.impl("cumsum_", | |
TORCH_FN(InplaceOrView::cumsum_) | |
); | |
m.impl("cumsum.out", | |
TORCH_FN(InplaceOrView::cumsum_out_out) | |
); | |
m.impl("deg2rad_", | |
TORCH_FN(InplaceOrView::deg2rad_) | |
); | |
m.impl("deg2rad.out", | |
TORCH_FN(InplaceOrView::deg2rad_out_out) | |
); | |
m.impl("diag.out", | |
TORCH_FN(InplaceOrView::diag_out_out) | |
); | |
m.impl("diagonal", | |
TORCH_FN(InplaceOrView::diagonal) | |
); | |
m.impl("digamma_", | |
TORCH_FN(InplaceOrView::digamma_) | |
); | |
m.impl("digamma.out", | |
TORCH_FN(InplaceOrView::digamma_out_out) | |
); | |
m.impl("div_.Tensor", | |
TORCH_FN(InplaceOrView::div__Tensor) | |
); | |
m.impl("div_.Tensor_mode", | |
TORCH_FN(InplaceOrView::div__Tensor_mode) | |
); | |
m.impl("div_.Scalar", | |
TORCH_FN(InplaceOrView::div__Scalar) | |
); | |
m.impl("div_.Scalar_mode", | |
TORCH_FN(InplaceOrView::div__Scalar_mode) | |
); | |
m.impl("div.out", | |
TORCH_FN(InplaceOrView::div_out_out) | |
); | |
m.impl("div.out_mode", | |
TORCH_FN(InplaceOrView::div_out_out_mode) | |
); | |
m.impl("dot.out", | |
TORCH_FN(InplaceOrView::dot_out_out) | |
); | |
m.impl("eig.e", | |
TORCH_FN(InplaceOrView::eig_out_e) | |
); | |
m.impl("elu_", | |
TORCH_FN(InplaceOrView::elu_) | |
); | |
m.impl("elu.out", | |
TORCH_FN(InplaceOrView::elu_out_out) | |
); | |
m.impl("embedding_renorm_", | |
TORCH_FN(InplaceOrView::embedding_renorm_) | |
); | |
m.impl("eq_.Scalar", | |
TORCH_FN(InplaceOrView::eq__Scalar) | |
); | |
m.impl("eq_.Tensor", | |
TORCH_FN(InplaceOrView::eq__Tensor) | |
); | |
m.impl("eq.Scalar_out", | |
TORCH_FN(InplaceOrView::eq_out_Scalar_out) | |
); | |
m.impl("eq.Tensor_out", | |
TORCH_FN(InplaceOrView::eq_out_Tensor_out) | |
); | |
m.impl("erf_", | |
TORCH_FN(InplaceOrView::erf_) | |
); | |
m.impl("erf.out", | |
TORCH_FN(InplaceOrView::erf_out_out) | |
); | |
m.impl("erfc_", | |
TORCH_FN(InplaceOrView::erfc_) | |
); | |
m.impl("erfc.out", | |
TORCH_FN(InplaceOrView::erfc_out_out) | |
); | |
m.impl("erfinv_", | |
TORCH_FN(InplaceOrView::erfinv_) | |
); | |
m.impl("erfinv.out", | |
TORCH_FN(InplaceOrView::erfinv_out_out) | |
); | |
m.impl("exp2_", | |
TORCH_FN(InplaceOrView::exp2_) | |
); | |
m.impl("exp2.out", | |
TORCH_FN(InplaceOrView::exp2_out_out) | |
); | |
m.impl("exp_", | |
TORCH_FN(InplaceOrView::exp_) | |
); | |
m.impl("exp.out", | |
TORCH_FN(InplaceOrView::exp_out_out) | |
); | |
m.impl("expand", | |
TORCH_FN(InplaceOrView::expand) | |
); | |
m.impl("expm1_", | |
TORCH_FN(InplaceOrView::expm1_) | |
); | |
m.impl("expm1.out", | |
TORCH_FN(InplaceOrView::expm1_out_out) | |
); | |
m.impl("exponential_", | |
TORCH_FN(InplaceOrView::exponential_) | |
); | |
m.impl("eye.out", | |
TORCH_FN(InplaceOrView::eye_out_out) | |
); | |
m.impl("eye.m_out", | |
TORCH_FN(InplaceOrView::eye_out_m_out) | |
); | |
m.impl("fill_.Scalar", | |
TORCH_FN(InplaceOrView::fill__Scalar) | |
); | |
m.impl("fill_.Tensor", | |
TORCH_FN(InplaceOrView::fill__Tensor) | |
); | |
m.impl("floor_", | |
TORCH_FN(InplaceOrView::floor_) | |
); | |
m.impl("floor_divide_.Tensor", | |
TORCH_FN(InplaceOrView::floor_divide__Tensor) | |
); | |
m.impl("floor_divide.out", | |
TORCH_FN(InplaceOrView::floor_divide_out_out) | |
); | |
m.impl("floor.out", | |
TORCH_FN(InplaceOrView::floor_out_out) | |
); | |
m.impl("fmax.out", | |
TORCH_FN(InplaceOrView::fmax_out_out) | |
); | |
m.impl("fmin.out", | |
TORCH_FN(InplaceOrView::fmin_out_out) | |
); | |
m.impl("fmod_.Scalar", | |
TORCH_FN(InplaceOrView::fmod__Scalar) | |
); | |
m.impl("fmod_.Tensor", | |
TORCH_FN(InplaceOrView::fmod__Tensor) | |
); | |
m.impl("fmod.Scalar_out", | |
TORCH_FN(InplaceOrView::fmod_out_Scalar_out) | |
); | |
m.impl("fmod.Tensor_out", | |
TORCH_FN(InplaceOrView::fmod_out_Tensor_out) | |
); | |
m.impl("frac_", | |
TORCH_FN(InplaceOrView::frac_) | |
); | |
m.impl("frac.out", | |
TORCH_FN(InplaceOrView::frac_out_out) | |
); | |
m.impl("fractional_max_pool2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::fractional_max_pool2d_backward_out_grad_input) | |
); | |
m.impl("fractional_max_pool2d.output", | |
TORCH_FN(InplaceOrView::fractional_max_pool2d_out_output) | |
); | |
m.impl("fractional_max_pool3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::fractional_max_pool3d_backward_out_grad_input) | |
); | |
m.impl("fractional_max_pool3d.output", | |
TORCH_FN(InplaceOrView::fractional_max_pool3d_out_output) | |
); | |
m.impl("gather.out", | |
TORCH_FN(InplaceOrView::gather_out_out) | |
); | |
m.impl("gcd.out", | |
TORCH_FN(InplaceOrView::gcd_out_out) | |
); | |
m.impl("ge_.Scalar", | |
TORCH_FN(InplaceOrView::ge__Scalar) | |
); | |
m.impl("ge_.Tensor", | |
TORCH_FN(InplaceOrView::ge__Tensor) | |
); | |
m.impl("ge.Scalar_out", | |
TORCH_FN(InplaceOrView::ge_out_Scalar_out) | |
); | |
m.impl("ge.Tensor_out", | |
TORCH_FN(InplaceOrView::ge_out_Tensor_out) | |
); | |
m.impl("geometric_", | |
TORCH_FN(InplaceOrView::geometric_) | |
); | |
m.impl("geqrf.a", | |
TORCH_FN(InplaceOrView::geqrf_out_a) | |
); | |
m.impl("ger.out", | |
TORCH_FN(InplaceOrView::ger_out_out) | |
); | |
m.impl("glu_backward.grad_input", | |
TORCH_FN(InplaceOrView::glu_backward_out_grad_input) | |
); | |
m.impl("glu.out", | |
TORCH_FN(InplaceOrView::glu_out_out) | |
); | |
m.impl("gt_.Scalar", | |
TORCH_FN(InplaceOrView::gt__Scalar) | |
); | |
m.impl("gt_.Tensor", | |
TORCH_FN(InplaceOrView::gt__Tensor) | |
); | |
m.impl("gt.Scalar_out", | |
TORCH_FN(InplaceOrView::gt_out_Scalar_out) | |
); | |
m.impl("gt.Tensor_out", | |
TORCH_FN(InplaceOrView::gt_out_Tensor_out) | |
); | |
m.impl("hardsigmoid_", | |
TORCH_FN(InplaceOrView::hardsigmoid_) | |
); | |
m.impl("hardsigmoid.out", | |
TORCH_FN(InplaceOrView::hardsigmoid_out_out) | |
); | |
m.impl("hardswish_", | |
TORCH_FN(InplaceOrView::hardswish_) | |
); | |
m.impl("hardswish.out", | |
TORCH_FN(InplaceOrView::hardswish_out_out) | |
); | |
m.impl("hardtanh_", | |
TORCH_FN(InplaceOrView::hardtanh_) | |
); | |
m.impl("hardtanh_backward.grad_input", | |
TORCH_FN(InplaceOrView::hardtanh_backward_out_grad_input) | |
); | |
m.impl("hardtanh.out", | |
TORCH_FN(InplaceOrView::hardtanh_out_out) | |
); | |
m.impl("heaviside.out", | |
TORCH_FN(InplaceOrView::heaviside_out_out) | |
); | |
m.impl("histc.out", | |
TORCH_FN(InplaceOrView::histc_out_out) | |
); | |
m.impl("hspmm.out", | |
TORCH_FN(InplaceOrView::hspmm_out_out) | |
); | |
m.impl("huber_loss_backward.out", | |
TORCH_FN(InplaceOrView::huber_loss_backward_out_out) | |
); | |
m.impl("huber_loss.out", | |
TORCH_FN(InplaceOrView::huber_loss_out_out) | |
); | |
m.impl("hypot_", | |
TORCH_FN(InplaceOrView::hypot_) | |
); | |
m.impl("hypot.out", | |
TORCH_FN(InplaceOrView::hypot_out_out) | |
); | |
m.impl("i0_", | |
TORCH_FN(InplaceOrView::i0_) | |
); | |
m.impl("i0.out", | |
TORCH_FN(InplaceOrView::i0_out_out) | |
); | |
m.impl("igamma_", | |
TORCH_FN(InplaceOrView::igamma_) | |
); | |
m.impl("igamma.out", | |
TORCH_FN(InplaceOrView::igamma_out_out) | |
); | |
m.impl("igammac_", | |
TORCH_FN(InplaceOrView::igammac_) | |
); | |
m.impl("igammac.out", | |
TORCH_FN(InplaceOrView::igammac_out_out) | |
); | |
m.impl("im2col_backward.grad_input", | |
TORCH_FN(InplaceOrView::im2col_backward_out_grad_input) | |
); | |
m.impl("im2col.out", | |
TORCH_FN(InplaceOrView::im2col_out_out) | |
); | |
m.impl("index_add_", | |
TORCH_FN(InplaceOrView::index_add_) | |
); | |
m.impl("index_copy_", | |
TORCH_FN(InplaceOrView::index_copy_) | |
); | |
m.impl("index_fill_.int_Scalar", | |
TORCH_FN(InplaceOrView::index_fill__int_Scalar) | |
); | |
m.impl("index_fill_.int_Tensor", | |
TORCH_FN(InplaceOrView::index_fill__int_Tensor) | |
); | |
m.impl("index_put_", | |
TORCH_FN(InplaceOrView::index_put_) | |
); | |
m.impl("index_select.out", | |
TORCH_FN(InplaceOrView::index_select_out_out) | |
); | |
m.impl("indices", | |
TORCH_FN(InplaceOrView::indices) | |
); | |
m.impl("inverse.out", | |
TORCH_FN(InplaceOrView::inverse_out_out) | |
); | |
m.impl("isneginf.out", | |
TORCH_FN(InplaceOrView::isneginf_out_out) | |
); | |
m.impl("isposinf.out", | |
TORCH_FN(InplaceOrView::isposinf_out_out) | |
); | |
m.impl("kthvalue.values", | |
TORCH_FN(InplaceOrView::kthvalue_out_values) | |
); | |
m.impl("l1_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::l1_loss_backward_out_grad_input) | |
); | |
m.impl("l1_loss.out", | |
TORCH_FN(InplaceOrView::l1_loss_out_out) | |
); | |
m.impl("lcm.out", | |
TORCH_FN(InplaceOrView::lcm_out_out) | |
); | |
m.impl("le_.Scalar", | |
TORCH_FN(InplaceOrView::le__Scalar) | |
); | |
m.impl("le_.Tensor", | |
TORCH_FN(InplaceOrView::le__Tensor) | |
); | |
m.impl("le.Scalar_out", | |
TORCH_FN(InplaceOrView::le_out_Scalar_out) | |
); | |
m.impl("le.Tensor_out", | |
TORCH_FN(InplaceOrView::le_out_Tensor_out) | |
); | |
m.impl("leaky_relu_", | |
TORCH_FN(InplaceOrView::leaky_relu_) | |
); | |
m.impl("leaky_relu.out", | |
TORCH_FN(InplaceOrView::leaky_relu_out_out) | |
); | |
m.impl("lerp_.Scalar", | |
TORCH_FN(InplaceOrView::lerp__Scalar) | |
); | |
m.impl("lerp_.Tensor", | |
TORCH_FN(InplaceOrView::lerp__Tensor) | |
); | |
m.impl("lerp.Scalar_out", | |
TORCH_FN(InplaceOrView::lerp_out_Scalar_out) | |
); | |
m.impl("lerp.Tensor_out", | |
TORCH_FN(InplaceOrView::lerp_out_Tensor_out) | |
); | |
m.impl("lgamma_", | |
TORCH_FN(InplaceOrView::lgamma_) | |
); | |
m.impl("lgamma.out", | |
TORCH_FN(InplaceOrView::lgamma_out_out) | |
); | |
m.impl("linalg_cholesky.out", | |
TORCH_FN(InplaceOrView::linalg_cholesky_out_out) | |
); | |
m.impl("linalg_eigh.eigvals", | |
TORCH_FN(InplaceOrView::linalg_eigh_out_eigvals) | |
); | |
m.impl("linalg_eigvalsh.out", | |
TORCH_FN(InplaceOrView::linalg_eigvalsh_out_out) | |
); | |
m.impl("linalg_inv.out", | |
TORCH_FN(InplaceOrView::linalg_inv_out_out) | |
); | |
m.impl("linalg_qr.out", | |
TORCH_FN(InplaceOrView::linalg_qr_out_out) | |
); | |
m.impl("linalg_slogdet.out", | |
TORCH_FN(InplaceOrView::linalg_slogdet_out_out) | |
); | |
m.impl("linalg_solve.out", | |
TORCH_FN(InplaceOrView::linalg_solve_out_out) | |
); | |
m.impl("linspace.out", | |
TORCH_FN(InplaceOrView::linspace_out_out) | |
); | |
m.impl("log10_", | |
TORCH_FN(InplaceOrView::log10_) | |
); | |
m.impl("log10.out", | |
TORCH_FN(InplaceOrView::log10_out_out) | |
); | |
m.impl("log1p_", | |
TORCH_FN(InplaceOrView::log1p_) | |
); | |
m.impl("log1p.out", | |
TORCH_FN(InplaceOrView::log1p_out_out) | |
); | |
m.impl("log2_", | |
TORCH_FN(InplaceOrView::log2_) | |
); | |
m.impl("log2.out", | |
TORCH_FN(InplaceOrView::log2_out_out) | |
); | |
m.impl("log_", | |
TORCH_FN(InplaceOrView::log_) | |
); | |
m.impl("log_normal_", | |
TORCH_FN(InplaceOrView::log_normal_) | |
); | |
m.impl("log.out", | |
TORCH_FN(InplaceOrView::log_out_out) | |
); | |
m.impl("log_sigmoid_backward.grad_input", | |
TORCH_FN(InplaceOrView::log_sigmoid_backward_out_grad_input) | |
); | |
m.impl("log_sigmoid_forward.output", | |
TORCH_FN(InplaceOrView::log_sigmoid_forward_out_output) | |
); | |
m.impl("logaddexp2.out", | |
TORCH_FN(InplaceOrView::logaddexp2_out_out) | |
); | |
m.impl("logaddexp.out", | |
TORCH_FN(InplaceOrView::logaddexp_out_out) | |
); | |
m.impl("logcumsumexp.out", | |
TORCH_FN(InplaceOrView::logcumsumexp_out_out) | |
); | |
m.impl("logical_and.out", | |
TORCH_FN(InplaceOrView::logical_and_out_out) | |
); | |
m.impl("logical_not.out", | |
TORCH_FN(InplaceOrView::logical_not_out_out) | |
); | |
m.impl("logical_or.out", | |
TORCH_FN(InplaceOrView::logical_or_out_out) | |
); | |
m.impl("logical_xor.out", | |
TORCH_FN(InplaceOrView::logical_xor_out_out) | |
); | |
m.impl("logit_", | |
TORCH_FN(InplaceOrView::logit_) | |
); | |
m.impl("logit_backward.grad_input", | |
TORCH_FN(InplaceOrView::logit_backward_out_grad_input) | |
); | |
m.impl("logit.out", | |
TORCH_FN(InplaceOrView::logit_out_out) | |
); | |
m.impl("logspace.out", | |
TORCH_FN(InplaceOrView::logspace_out_out) | |
); | |
m.impl("logsumexp.out", | |
TORCH_FN(InplaceOrView::logsumexp_out_out) | |
); | |
m.impl("lstsq.X", | |
TORCH_FN(InplaceOrView::lstsq_out_X) | |
); | |
m.impl("lt_.Scalar", | |
TORCH_FN(InplaceOrView::lt__Scalar) | |
); | |
m.impl("lt_.Tensor", | |
TORCH_FN(InplaceOrView::lt__Tensor) | |
); | |
m.impl("lt.Scalar_out", | |
TORCH_FN(InplaceOrView::lt_out_Scalar_out) | |
); | |
m.impl("lt.Tensor_out", | |
TORCH_FN(InplaceOrView::lt_out_Tensor_out) | |
); | |
m.impl("lu_solve.out", | |
TORCH_FN(InplaceOrView::lu_solve_out_out) | |
); | |
m.impl("masked_fill_.Scalar", | |
TORCH_FN(InplaceOrView::masked_fill__Scalar) | |
); | |
m.impl("masked_fill_.Tensor", | |
TORCH_FN(InplaceOrView::masked_fill__Tensor) | |
); | |
m.impl("masked_scatter_", | |
TORCH_FN(InplaceOrView::masked_scatter_) | |
); | |
m.impl("masked_select.out", | |
TORCH_FN(InplaceOrView::masked_select_out_out) | |
); | |
m.impl("max.dim_max", | |
TORCH_FN(InplaceOrView::max_out_dim_max) | |
); | |
m.impl("max_pool2d_with_indices_backward.grad_input", | |
TORCH_FN(InplaceOrView::max_pool2d_with_indices_backward_out_grad_input) | |
); | |
m.impl("max_pool2d_with_indices.out", | |
TORCH_FN(InplaceOrView::max_pool2d_with_indices_out_out) | |
); | |
m.impl("max_pool3d_with_indices_backward.grad_input", | |
TORCH_FN(InplaceOrView::max_pool3d_with_indices_backward_out_grad_input) | |
); | |
m.impl("max_pool3d_with_indices.out", | |
TORCH_FN(InplaceOrView::max_pool3d_with_indices_out_out) | |
); | |
m.impl("max_unpool2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::max_unpool2d_backward_out_grad_input) | |
); | |
m.impl("max_unpool2d.out", | |
TORCH_FN(InplaceOrView::max_unpool2d_out_out) | |
); | |
m.impl("max_unpool3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::max_unpool3d_backward_out_grad_input) | |
); | |
m.impl("max_unpool3d.out", | |
TORCH_FN(InplaceOrView::max_unpool3d_out_out) | |
); | |
m.impl("maximum.out", | |
TORCH_FN(InplaceOrView::maximum_out_out) | |
); | |
m.impl("mean.out", | |
TORCH_FN(InplaceOrView::mean_out_out) | |
); | |
m.impl("median.dim_values", | |
TORCH_FN(InplaceOrView::median_out_dim_values) | |
); | |
m.impl("min.dim_min", | |
TORCH_FN(InplaceOrView::min_out_dim_min) | |
); | |
m.impl("minimum.out", | |
TORCH_FN(InplaceOrView::minimum_out_out) | |
); | |
m.impl("mm.out", | |
TORCH_FN(InplaceOrView::mm_out_out) | |
); | |
m.impl("mode.values", | |
TORCH_FN(InplaceOrView::mode_out_values) | |
); | |
m.impl("mse_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::mse_loss_backward_out_grad_input) | |
); | |
m.impl("mse_loss.out", | |
TORCH_FN(InplaceOrView::mse_loss_out_out) | |
); | |
m.impl("mul_.Tensor", | |
TORCH_FN(InplaceOrView::mul__Tensor) | |
); | |
m.impl("mul_.Scalar", | |
TORCH_FN(InplaceOrView::mul__Scalar) | |
); | |
m.impl("mul.out", | |
TORCH_FN(InplaceOrView::mul_out_out) | |
); | |
m.impl("multi_margin_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::multi_margin_loss_backward_out_grad_input) | |
); | |
m.impl("multi_margin_loss.out", | |
TORCH_FN(InplaceOrView::multi_margin_loss_out_out) | |
); | |
m.impl("multilabel_margin_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::multilabel_margin_loss_backward_out_grad_input) | |
); | |
m.impl("multilabel_margin_loss_forward.output", | |
TORCH_FN(InplaceOrView::multilabel_margin_loss_forward_out_output) | |
); | |
m.impl("multinomial.out", | |
TORCH_FN(InplaceOrView::multinomial_out_out) | |
); | |
m.impl("mv.out", | |
TORCH_FN(InplaceOrView::mv_out_out) | |
); | |
m.impl("mvlgamma_", | |
TORCH_FN(InplaceOrView::mvlgamma_) | |
); | |
m.impl("nan_to_num_", | |
TORCH_FN(InplaceOrView::nan_to_num_) | |
); | |
m.impl("nan_to_num.out", | |
TORCH_FN(InplaceOrView::nan_to_num_out_out) | |
); | |
m.impl("nanmedian.dim_values", | |
TORCH_FN(InplaceOrView::nanmedian_out_dim_values) | |
); | |
m.impl("nansum.IntList_out", | |
TORCH_FN(InplaceOrView::nansum_out_IntList_out) | |
); | |
m.impl("narrow_copy.out", | |
TORCH_FN(InplaceOrView::narrow_copy_out_out) | |
); | |
m.impl("native_batch_norm.out", | |
TORCH_FN(InplaceOrView::native_batch_norm_out_out) | |
); | |
m.impl("ne_.Scalar", | |
TORCH_FN(InplaceOrView::ne__Scalar) | |
); | |
m.impl("ne_.Tensor", | |
TORCH_FN(InplaceOrView::ne__Tensor) | |
); | |
m.impl("ne.Scalar_out", | |
TORCH_FN(InplaceOrView::ne_out_Scalar_out) | |
); | |
m.impl("ne.Tensor_out", | |
TORCH_FN(InplaceOrView::ne_out_Tensor_out) | |
); | |
m.impl("neg_", | |
TORCH_FN(InplaceOrView::neg_) | |
); | |
m.impl("neg.out", | |
TORCH_FN(InplaceOrView::neg_out_out) | |
); | |
m.impl("nextafter_", | |
TORCH_FN(InplaceOrView::nextafter_) | |
); | |
m.impl("nextafter.out", | |
TORCH_FN(InplaceOrView::nextafter_out_out) | |
); | |
m.impl("nll_loss2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::nll_loss2d_backward_out_grad_input) | |
); | |
m.impl("nll_loss2d_forward.output", | |
TORCH_FN(InplaceOrView::nll_loss2d_forward_out_output) | |
); | |
m.impl("nll_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::nll_loss_backward_out_grad_input) | |
); | |
m.impl("nll_loss_forward.output", | |
TORCH_FN(InplaceOrView::nll_loss_forward_out_output) | |
); | |
m.impl("nonzero.out", | |
TORCH_FN(InplaceOrView::nonzero_out_out) | |
); | |
m.impl("norm.dtype_out", | |
TORCH_FN(InplaceOrView::norm_out_dtype_out) | |
); | |
m.impl("norm.out", | |
TORCH_FN(InplaceOrView::norm_out_out) | |
); | |
m.impl("normal_", | |
TORCH_FN(InplaceOrView::normal_) | |
); | |
m.impl("normal.Tensor_float_out", | |
TORCH_FN(InplaceOrView::normal_out_Tensor_float_out) | |
); | |
m.impl("normal.float_Tensor_out", | |
TORCH_FN(InplaceOrView::normal_out_float_Tensor_out) | |
); | |
m.impl("normal.Tensor_Tensor_out", | |
TORCH_FN(InplaceOrView::normal_out_Tensor_Tensor_out) | |
); | |
m.impl("orgqr.out", | |
TORCH_FN(InplaceOrView::orgqr_out_out) | |
); | |
m.impl("ormqr.out", | |
TORCH_FN(InplaceOrView::ormqr_out_out) | |
); | |
m.impl("permute", | |
TORCH_FN(InplaceOrView::permute) | |
); | |
m.impl("polar.out", | |
TORCH_FN(InplaceOrView::polar_out_out) | |
); | |
m.impl("polygamma.out", | |
TORCH_FN(InplaceOrView::polygamma_out_out) | |
); | |
m.impl("pow_.Scalar", | |
TORCH_FN(InplaceOrView::pow__Scalar) | |
); | |
m.impl("pow_.Tensor", | |
TORCH_FN(InplaceOrView::pow__Tensor) | |
); | |
m.impl("pow.Tensor_Tensor_out", | |
TORCH_FN(InplaceOrView::pow_out_Tensor_Tensor_out) | |
); | |
m.impl("pow.Scalar_out", | |
TORCH_FN(InplaceOrView::pow_out_Scalar_out) | |
); | |
m.impl("pow.Tensor_Scalar_out", | |
TORCH_FN(InplaceOrView::pow_out_Tensor_Scalar_out) | |
); | |
m.impl("prod.int_out", | |
TORCH_FN(InplaceOrView::prod_out_int_out) | |
); | |
m.impl("put_", | |
TORCH_FN(InplaceOrView::put_) | |
); | |
m.impl("rad2deg_", | |
TORCH_FN(InplaceOrView::rad2deg_) | |
); | |
m.impl("rad2deg.out", | |
TORCH_FN(InplaceOrView::rad2deg_out_out) | |
); | |
m.impl("random_.from", | |
TORCH_FN(InplaceOrView::random__from) | |
); | |
m.impl("random_.to", | |
TORCH_FN(InplaceOrView::random__to) | |
); | |
m.impl("random_", | |
TORCH_FN(InplaceOrView::random_) | |
); | |
m.impl("randperm.generator_out", | |
TORCH_FN(InplaceOrView::randperm_out_generator_out) | |
); | |
m.impl("range.out", | |
TORCH_FN(InplaceOrView::range_out_out) | |
); | |
m.impl("reciprocal_", | |
TORCH_FN(InplaceOrView::reciprocal_) | |
); | |
m.impl("reciprocal.out", | |
TORCH_FN(InplaceOrView::reciprocal_out_out) | |
); | |
m.impl("reflection_pad1d_backward.grad_input", | |
TORCH_FN(InplaceOrView::reflection_pad1d_backward_out_grad_input) | |
); | |
m.impl("reflection_pad1d.out", | |
TORCH_FN(InplaceOrView::reflection_pad1d_out_out) | |
); | |
m.impl("reflection_pad2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::reflection_pad2d_backward_out_grad_input) | |
); | |
m.impl("reflection_pad2d.out", | |
TORCH_FN(InplaceOrView::reflection_pad2d_out_out) | |
); | |
m.impl("relu_", | |
TORCH_FN(InplaceOrView::relu_) | |
); | |
m.impl("remainder_.Scalar", | |
TORCH_FN(InplaceOrView::remainder__Scalar) | |
); | |
m.impl("remainder_.Tensor", | |
TORCH_FN(InplaceOrView::remainder__Tensor) | |
); | |
m.impl("remainder.Scalar_out", | |
TORCH_FN(InplaceOrView::remainder_out_Scalar_out) | |
); | |
m.impl("remainder.Tensor_out", | |
TORCH_FN(InplaceOrView::remainder_out_Tensor_out) | |
); | |
m.impl("renorm_", | |
TORCH_FN(InplaceOrView::renorm_) | |
); | |
m.impl("renorm.out", | |
TORCH_FN(InplaceOrView::renorm_out_out) | |
); | |
m.impl("replication_pad1d_backward.grad_input", | |
TORCH_FN(InplaceOrView::replication_pad1d_backward_out_grad_input) | |
); | |
m.impl("replication_pad1d.out", | |
TORCH_FN(InplaceOrView::replication_pad1d_out_out) | |
); | |
m.impl("replication_pad2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::replication_pad2d_backward_out_grad_input) | |
); | |
m.impl("replication_pad2d.out", | |
TORCH_FN(InplaceOrView::replication_pad2d_out_out) | |
); | |
m.impl("replication_pad3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::replication_pad3d_backward_out_grad_input) | |
); | |
m.impl("replication_pad3d.out", | |
TORCH_FN(InplaceOrView::replication_pad3d_out_out) | |
); | |
m.impl("round_", | |
TORCH_FN(InplaceOrView::round_) | |
); | |
m.impl("round.out", | |
TORCH_FN(InplaceOrView::round_out_out) | |
); | |
m.impl("rrelu_with_noise_", | |
TORCH_FN(InplaceOrView::rrelu_with_noise_) | |
); | |
m.impl("rrelu_with_noise.out", | |
TORCH_FN(InplaceOrView::rrelu_with_noise_out_out) | |
); | |
m.impl("rsqrt_", | |
TORCH_FN(InplaceOrView::rsqrt_) | |
); | |
m.impl("rsqrt.out", | |
TORCH_FN(InplaceOrView::rsqrt_out_out) | |
); | |
m.impl("scatter_.src", | |
TORCH_FN(InplaceOrView::scatter__src) | |
); | |
m.impl("scatter_.value", | |
TORCH_FN(InplaceOrView::scatter__value) | |
); | |
m.impl("scatter_.reduce", | |
TORCH_FN(InplaceOrView::scatter__reduce) | |
); | |
m.impl("scatter_.value_reduce", | |
TORCH_FN(InplaceOrView::scatter__value_reduce) | |
); | |
m.impl("scatter_add_", | |
TORCH_FN(InplaceOrView::scatter_add_) | |
); | |
m.impl("searchsorted.Tensor_out", | |
TORCH_FN(InplaceOrView::searchsorted_out_Tensor_out) | |
); | |
m.impl("select.int", | |
TORCH_FN(InplaceOrView::select_int) | |
); | |
m.impl("set_.source_Storage", | |
TORCH_FN(InplaceOrView::set__source_Storage) | |
); | |
m.impl("set_.source_Storage_storage_offset", | |
TORCH_FN(InplaceOrView::set__source_Storage_storage_offset) | |
); | |
m.impl("set_.source_Tensor", | |
TORCH_FN(InplaceOrView::set__source_Tensor) | |
); | |
m.impl("set_", | |
TORCH_FN(InplaceOrView::set_) | |
); | |
m.impl("sgn_", | |
TORCH_FN(InplaceOrView::sgn_) | |
); | |
m.impl("sgn.out", | |
TORCH_FN(InplaceOrView::sgn_out_out) | |
); | |
m.impl("sigmoid_", | |
TORCH_FN(InplaceOrView::sigmoid_) | |
); | |
m.impl("sigmoid_backward.grad_input", | |
TORCH_FN(InplaceOrView::sigmoid_backward_out_grad_input) | |
); | |
m.impl("sigmoid.out", | |
TORCH_FN(InplaceOrView::sigmoid_out_out) | |
); | |
m.impl("sign_", | |
TORCH_FN(InplaceOrView::sign_) | |
); | |
m.impl("sign.out", | |
TORCH_FN(InplaceOrView::sign_out_out) | |
); | |
m.impl("signbit.out", | |
TORCH_FN(InplaceOrView::signbit_out_out) | |
); | |
m.impl("silu_", | |
TORCH_FN(InplaceOrView::silu_) | |
); | |
m.impl("silu.out", | |
TORCH_FN(InplaceOrView::silu_out_out) | |
); | |
m.impl("sin_", | |
TORCH_FN(InplaceOrView::sin_) | |
); | |
m.impl("sin.out", | |
TORCH_FN(InplaceOrView::sin_out_out) | |
); | |
m.impl("sinc_", | |
TORCH_FN(InplaceOrView::sinc_) | |
); | |
m.impl("sinc.out", | |
TORCH_FN(InplaceOrView::sinc_out_out) | |
); | |
m.impl("sinh_", | |
TORCH_FN(InplaceOrView::sinh_) | |
); | |
m.impl("sinh.out", | |
TORCH_FN(InplaceOrView::sinh_out_out) | |
); | |
m.impl("slice.Tensor", | |
TORCH_FN(InplaceOrView::slice_Tensor) | |
); | |
m.impl("slow_conv3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::slow_conv3d_backward_out_grad_input) | |
); | |
m.impl("slow_conv3d_forward.output", | |
TORCH_FN(InplaceOrView::slow_conv3d_forward_out_output) | |
); | |
m.impl("slow_conv_transpose2d_backward.grad_output", | |
TORCH_FN(InplaceOrView::slow_conv_transpose2d_backward_out_grad_output) | |
); | |
m.impl("slow_conv_transpose2d.out", | |
TORCH_FN(InplaceOrView::slow_conv_transpose2d_out_out) | |
); | |
m.impl("slow_conv_transpose3d_backward.grad_output", | |
TORCH_FN(InplaceOrView::slow_conv_transpose3d_backward_out_grad_output) | |
); | |
m.impl("slow_conv_transpose3d.out", | |
TORCH_FN(InplaceOrView::slow_conv_transpose3d_out_out) | |
); | |
m.impl("smooth_l1_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::smooth_l1_loss_backward_out_grad_input) | |
); | |
m.impl("smooth_l1_loss.out", | |
TORCH_FN(InplaceOrView::smooth_l1_loss_out_out) | |
); | |
m.impl("soft_margin_loss_backward.grad_input", | |
TORCH_FN(InplaceOrView::soft_margin_loss_backward_out_grad_input) | |
); | |
m.impl("soft_margin_loss.out", | |
TORCH_FN(InplaceOrView::soft_margin_loss_out_out) | |
); | |
m.impl("softplus_backward.grad_input", | |
TORCH_FN(InplaceOrView::softplus_backward_out_grad_input) | |
); | |
m.impl("softplus.out", | |
TORCH_FN(InplaceOrView::softplus_out_out) | |
); | |
m.impl("softshrink_backward.grad_input", | |
TORCH_FN(InplaceOrView::softshrink_backward_out_grad_input) | |
); | |
m.impl("softshrink.out", | |
TORCH_FN(InplaceOrView::softshrink_out_out) | |
); | |
m.impl("solve.solution", | |
TORCH_FN(InplaceOrView::solve_out_solution) | |
); | |
m.impl("sort.values", | |
TORCH_FN(InplaceOrView::sort_out_values) | |
); | |
m.impl("sort.values_stable", | |
TORCH_FN(InplaceOrView::sort_out_values_stable) | |
); | |
m.impl("sparse_resize_", | |
TORCH_FN(InplaceOrView::sparse_resize_) | |
); | |
m.impl("sparse_resize_and_clear_", | |
TORCH_FN(InplaceOrView::sparse_resize_and_clear_) | |
); | |
m.impl("special_gammaln.out", | |
TORCH_FN(InplaceOrView::special_gammaln_out_out) | |
); | |
m.impl("split.Tensor", | |
TORCH_FN(InplaceOrView::split_Tensor) | |
); | |
m.impl("split_with_sizes", | |
TORCH_FN(InplaceOrView::split_with_sizes) | |
); | |
m.impl("sqrt_", | |
TORCH_FN(InplaceOrView::sqrt_) | |
); | |
m.impl("sqrt.out", | |
TORCH_FN(InplaceOrView::sqrt_out_out) | |
); | |
m.impl("squeeze", | |
TORCH_FN(InplaceOrView::squeeze) | |
); | |
m.impl("squeeze.dim", | |
TORCH_FN(InplaceOrView::squeeze_dim) | |
); | |
m.impl("squeeze_", | |
TORCH_FN(InplaceOrView::squeeze_) | |
); | |
m.impl("squeeze_.dim", | |
TORCH_FN(InplaceOrView::squeeze__dim) | |
); | |
m.impl("sspaddmm.out", | |
TORCH_FN(InplaceOrView::sspaddmm_out_out) | |
); | |
m.impl("stack.out", | |
TORCH_FN(InplaceOrView::stack_out_out) | |
); | |
m.impl("std.out", | |
TORCH_FN(InplaceOrView::std_out_out) | |
); | |
m.impl("sub_.Tensor", | |
TORCH_FN(InplaceOrView::sub__Tensor) | |
); | |
m.impl("sub_.Scalar", | |
TORCH_FN(InplaceOrView::sub__Scalar) | |
); | |
m.impl("sub.out", | |
TORCH_FN(InplaceOrView::sub_out_out) | |
); | |
m.impl("sum.IntList_out", | |
TORCH_FN(InplaceOrView::sum_out_IntList_out) | |
); | |
m.impl("symeig.e", | |
TORCH_FN(InplaceOrView::symeig_out_e) | |
); | |
m.impl("t", | |
TORCH_FN(InplaceOrView::t) | |
); | |
m.impl("t_", | |
TORCH_FN(InplaceOrView::t_) | |
); | |
m.impl("take.out", | |
TORCH_FN(InplaceOrView::take_out_out) | |
); | |
m.impl("tan_", | |
TORCH_FN(InplaceOrView::tan_) | |
); | |
m.impl("tan.out", | |
TORCH_FN(InplaceOrView::tan_out_out) | |
); | |
m.impl("tanh_", | |
TORCH_FN(InplaceOrView::tanh_) | |
); | |
m.impl("tanh_backward.grad_input", | |
TORCH_FN(InplaceOrView::tanh_backward_out_grad_input) | |
); | |
m.impl("tanh.out", | |
TORCH_FN(InplaceOrView::tanh_out_out) | |
); | |
m.impl("tensordot.out", | |
TORCH_FN(InplaceOrView::tensordot_out_out) | |
); | |
m.impl("thnn_conv2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::thnn_conv2d_backward_out_grad_input) | |
); | |
m.impl("thnn_conv2d_forward.output", | |
TORCH_FN(InplaceOrView::thnn_conv2d_forward_out_output) | |
); | |
m.impl("thnn_conv_depthwise2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::thnn_conv_depthwise2d_backward_out_grad_input) | |
); | |
m.impl("thnn_conv_depthwise2d_forward.out", | |
TORCH_FN(InplaceOrView::thnn_conv_depthwise2d_forward_out_out) | |
); | |
m.impl("threshold_", | |
TORCH_FN(InplaceOrView::threshold_) | |
); | |
m.impl("threshold.out", | |
TORCH_FN(InplaceOrView::threshold_out_out) | |
); | |
m.impl("topk.values", | |
TORCH_FN(InplaceOrView::topk_out_values) | |
); | |
m.impl("transpose.int", | |
TORCH_FN(InplaceOrView::transpose_int) | |
); | |
m.impl("transpose_", | |
TORCH_FN(InplaceOrView::transpose_) | |
); | |
m.impl("triangular_solve.X", | |
TORCH_FN(InplaceOrView::triangular_solve_out_X) | |
); | |
m.impl("tril_", | |
TORCH_FN(InplaceOrView::tril_) | |
); | |
m.impl("tril.out", | |
TORCH_FN(InplaceOrView::tril_out_out) | |
); | |
m.impl("triu_", | |
TORCH_FN(InplaceOrView::triu_) | |
); | |
m.impl("triu.out", | |
TORCH_FN(InplaceOrView::triu_out_out) | |
); | |
m.impl("trunc_", | |
TORCH_FN(InplaceOrView::trunc_) | |
); | |
m.impl("trunc.out", | |
TORCH_FN(InplaceOrView::trunc_out_out) | |
); | |
m.impl("unbind.int", | |
TORCH_FN(InplaceOrView::unbind_int) | |
); | |
m.impl("unfold", | |
TORCH_FN(InplaceOrView::unfold) | |
); | |
m.impl("uniform_", | |
TORCH_FN(InplaceOrView::uniform_) | |
); | |
m.impl("unsqueeze", | |
TORCH_FN(InplaceOrView::unsqueeze) | |
); | |
m.impl("unsqueeze_", | |
TORCH_FN(InplaceOrView::unsqueeze_) | |
); | |
m.impl("upsample_bicubic2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_bicubic2d_backward_out_grad_input) | |
); | |
m.impl("upsample_bicubic2d.out", | |
TORCH_FN(InplaceOrView::upsample_bicubic2d_out_out) | |
); | |
m.impl("upsample_bilinear2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_bilinear2d_backward_out_grad_input) | |
); | |
m.impl("upsample_bilinear2d.out", | |
TORCH_FN(InplaceOrView::upsample_bilinear2d_out_out) | |
); | |
m.impl("upsample_linear1d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_linear1d_backward_out_grad_input) | |
); | |
m.impl("upsample_linear1d.out", | |
TORCH_FN(InplaceOrView::upsample_linear1d_out_out) | |
); | |
m.impl("upsample_nearest1d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_nearest1d_backward_out_grad_input) | |
); | |
m.impl("upsample_nearest1d.out", | |
TORCH_FN(InplaceOrView::upsample_nearest1d_out_out) | |
); | |
m.impl("upsample_nearest2d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_nearest2d_backward_out_grad_input) | |
); | |
m.impl("upsample_nearest2d.out", | |
TORCH_FN(InplaceOrView::upsample_nearest2d_out_out) | |
); | |
m.impl("upsample_nearest3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_nearest3d_backward_out_grad_input) | |
); | |
m.impl("upsample_nearest3d.out", | |
TORCH_FN(InplaceOrView::upsample_nearest3d_out_out) | |
); | |
m.impl("upsample_trilinear3d_backward.grad_input", | |
TORCH_FN(InplaceOrView::upsample_trilinear3d_backward_out_grad_input) | |
); | |
m.impl("upsample_trilinear3d.out", | |
TORCH_FN(InplaceOrView::upsample_trilinear3d_out_out) | |
); | |
m.impl("values", | |
TORCH_FN(InplaceOrView::values) | |
); | |
m.impl("var.out", | |
TORCH_FN(InplaceOrView::var_out_out) | |
); | |
m.impl("vdot.out", | |
TORCH_FN(InplaceOrView::vdot_out_out) | |
); | |
m.impl("view", | |
TORCH_FN(InplaceOrView::view) | |
); | |
m.impl("view.dtype", | |
TORCH_FN(InplaceOrView::view_dtype) | |
); | |
m.impl("view_as_complex", | |
TORCH_FN(InplaceOrView::view_as_complex) | |
); | |
m.impl("view_as_real", | |
TORCH_FN(InplaceOrView::view_as_real) | |
); | |
m.impl("xlogy_.Tensor", | |
TORCH_FN(InplaceOrView::xlogy__Tensor) | |
); | |
m.impl("xlogy_.Scalar_Other", | |
TORCH_FN(InplaceOrView::xlogy__Scalar_Other) | |
); | |
m.impl("xlogy.OutTensor", | |
TORCH_FN(InplaceOrView::xlogy_out_OutTensor) | |
); | |
m.impl("xlogy.OutScalar_Self", | |
TORCH_FN(InplaceOrView::xlogy_out_OutScalar_Self) | |
); | |
m.impl("xlogy.OutScalar_Other", | |
TORCH_FN(InplaceOrView::xlogy_out_OutScalar_Other) | |
); | |
m.impl("zero_", | |
TORCH_FN(InplaceOrView::zero_) | |
);; | |
} | |
} // namespace | |
} // namespace torch |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment