Skip to content

Instantly share code, notes, and snippets.

@ailzhang
Created July 31, 2020 20:20
Show Gist options
  • Save ailzhang/18876edec4dad54e43a1db0c127c5707 to your computer and use it in GitHub Desktop.
Save ailzhang/18876edec4dad54e43a1db0c127c5707 to your computer and use it in GitHub Desktop.
--- before42031/VariableTypeEverything.cpp 2020-07-31 13:16:02.450902000 -0700
+++ after42031/VariableTypeEverything.cpp 2020-07-31 13:06:18.799446000 -0700
@@ -46,22 +46,6 @@
// Later when we merge the mobile op registration the anonymous namespace
// will be restored.
// namespace {
-Tensor __and___Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::__and___Scalar(self, other);
- return result;
-}
-Tensor __and___Tensor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::__and___Tensor(self, other);
- return result;
-}
-Tensor & __iand___Scalar(Tensor & self, Scalar other) {
- TypeDefault::__iand___Scalar(self, other);
- return self;
-}
-Tensor & __iand___Tensor(Tensor & self, const Tensor & other) {
- TypeDefault::__iand___Tensor(self, other);
- return self;
-}
Tensor & __ilshift___Scalar(Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -110,14 +94,6 @@
increment_version(self);
return self;
}
-Tensor & __ior___Scalar(Tensor & self, Scalar other) {
- TypeDefault::__ior___Scalar(self, other);
- return self;
-}
-Tensor & __ior___Tensor(Tensor & self, const Tensor & other) {
- TypeDefault::__ior___Tensor(self, other);
- return self;
-}
Tensor & __irshift___Scalar(Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -166,14 +142,6 @@
increment_version(self);
return self;
}
-Tensor & __ixor___Scalar(Tensor & self, Scalar other) {
- TypeDefault::__ixor___Scalar(self, other);
- return self;
-}
-Tensor & __ixor___Tensor(Tensor & self, const Tensor & other) {
- TypeDefault::__ixor___Tensor(self, other);
- return self;
-}
Tensor __lshift___Scalar(const Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -222,14 +190,6 @@
#endif
return result;
}
-Tensor __or___Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::__or___Scalar(self, other);
- return result;
-}
-Tensor __or___Tensor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::__or___Tensor(self, other);
- return result;
-}
Tensor __rshift___Scalar(const Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -278,14 +238,6 @@
#endif
return result;
}
-Tensor __xor___Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::__xor___Scalar(self, other);
- return result;
-}
-Tensor __xor___Tensor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::__xor___Tensor(self, other);
- return result;
-}
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AdaptiveAvgPool2DBackward> grad_fn;
@@ -353,10 +305,6 @@
}
return result;
}
-Tensor _add_batch_dim(const Tensor & self, int64_t batch_dim, int64_t level) {
- auto result = TypeDefault::_add_batch_dim(self, batch_dim, level);
- return result;
-}
Tensor & _addmv_impl_(Tensor & self, const Tensor & self2, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) {
auto& self_ = unpack(self, "self", 0);
auto& self2_ = unpack(self2, "self2", 1);
@@ -631,26 +579,6 @@
}
return result;
}
-Tensor & _baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) {
- TypeDefault::_baddbmm_mkl_(self, batch1, batch2, beta, alpha);
- return self;
-}
-std::tuple<Tensor,Tensor,Tensor,Tensor,int64_t> _batch_norm_impl_index(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- Tensor result3;
- int64_t result4;
- std::tie(result0, result1, result2, result3, result4) = TypeDefault::_batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3), std::move(result4));
-}
-std::tuple<Tensor,Tensor,Tensor> _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var_transform, bool train, double eps, std::array<bool,3> output_mask, const Tensor & reservedSpace) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::_batch_norm_impl_index_backward(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
Tensor _bmm(const Tensor & self, const Tensor & mat2, bool deterministic) {
auto& self_ = unpack(self, "self", 0);
auto& mat2_ = unpack(mat2, "mat2", 1);
@@ -740,38 +668,6 @@
}
return out;
}
-Tensor _cast_Byte(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Byte(self, non_blocking);
- return result;
-}
-Tensor _cast_Char(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Char(self, non_blocking);
- return result;
-}
-Tensor _cast_Double(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Double(self, non_blocking);
- return result;
-}
-Tensor _cast_Float(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Float(self, non_blocking);
- return result;
-}
-Tensor _cast_Half(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Half(self, non_blocking);
- return result;
-}
-Tensor _cast_Int(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Int(self, non_blocking);
- return result;
-}
-Tensor _cast_Long(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Long(self, non_blocking);
- return result;
-}
-Tensor _cast_Short(const Tensor & self, bool non_blocking) {
- auto result = TypeDefault::_cast_Short(self, non_blocking);
- return result;
-}
Tensor _cat(TensorList tensors, int64_t dim) {
auto tensors_ = unpack(tensors, "tensors", 0);
std::shared_ptr<NotImplemented> grad_fn;
@@ -1012,12 +908,6 @@
}
return result;
}
-std::tuple<double,int64_t> _choose_qparams_per_tensor(const Tensor & self, bool reduce_range) {
- double result0;
- int64_t result1;
- std::tie(result0, result1) = TypeDefault::_choose_qparams_per_tensor(self, reduce_range);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor & _coalesced_(Tensor & self, bool coalesced) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -1038,21 +928,6 @@
increment_version(self);
return self;
}
-Tensor _convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
- auto result = TypeDefault::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
- return result;
-}
-std::tuple<Tensor,Tensor,Tensor> _convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array<bool,3> output_mask) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::_convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
-Tensor _convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding) {
- auto result = TypeDefault::_convolution_nogroup(input, weight, bias, stride, padding, dilation, transposed, output_padding);
- return result;
-}
Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking) {
auto& self_ = unpack(self, "self", 0);
auto& dst_ = unpack(dst, "dst", 1);
@@ -1490,20 +1365,6 @@
}
return result;
}
-void _cufft_clear_plan_cache(int64_t device_index) {
- TypeDefault::_cufft_clear_plan_cache(device_index);
-}
-int64_t _cufft_get_plan_cache_max_size(int64_t device_index) {
- auto result = TypeDefault::_cufft_get_plan_cache_max_size(device_index);
- return result;
-}
-int64_t _cufft_get_plan_cache_size(int64_t device_index) {
- auto result = TypeDefault::_cufft_get_plan_cache_size(device_index);
- return result;
-}
-void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) {
- TypeDefault::_cufft_set_plan_cache_max_size(device_index, max_size);
-}
void _cummax_helper(const Tensor & self, Tensor & values, Tensor & indices, int64_t dim) {
auto& self_ = unpack(self, "self", 0);
auto& values_ = unpack(values, "values", 1);
@@ -1704,10 +1565,6 @@
}
return out;
}
-int64_t _debug_has_internal_overlap(const Tensor & self) {
- auto result = TypeDefault::_debug_has_internal_overlap(self);
- return result;
-}
int64_t _dimI(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -1748,10 +1605,6 @@
#endif
return result;
}
-Tensor _dim_arange(const Tensor & like, int64_t dim) {
- auto result = TypeDefault::_dim_arange(like, dim);
- return result;
-}
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) {
auto& x_ = unpack(x, "x", 0);
auto& alpha_ = unpack(alpha, "alpha", 1);
@@ -1869,10 +1722,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3));
}
-Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights) {
- auto result = TypeDefault::_embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights);
- return result;
-}
Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights) {
auto& grad_ = unpack(grad, "grad", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -2065,10 +1914,6 @@
}
return result;
}
-Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights) {
- auto result = TypeDefault::_embedding_bag_sparse_backward(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights);
- return result;
-}
Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options, double scale, int64_t zero_point, c10::optional<MemoryFormat> memory_format) {
auto options_ = TensorOptions(options);
auto tmp = ([&]() {
@@ -2206,13 +2051,6 @@
}
return result;
}
-std::tuple<Tensor,Tensor,Tensor> _fake_quantize_learnable_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::_fake_quantize_learnable_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
Tensor _fake_quantize_learnable_per_tensor_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
auto& self_ = unpack(self, "self", 0);
auto& scale_ = unpack(scale, "scale", 1);
@@ -2262,13 +2100,6 @@
}
return result;
}
-std::tuple<Tensor,Tensor,Tensor> _fake_quantize_learnable_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::_fake_quantize_learnable_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
Tensor _fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FftWithSizeBackward> grad_fn;
@@ -2340,14 +2171,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad) {
- auto result = TypeDefault::_gather_sparse_backward(self, dim, index, grad);
- return result;
-}
-bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from) {
- auto result = TypeDefault::_has_compatible_shallow_copy_type(self, from);
- return result;
-}
Tensor & _index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -3106,10 +2929,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-bool _nnpack_available() {
- auto result = TypeDefault::_nnpack_available();
- return result;
-}
Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride) {
auto& input_ = unpack(input, "input", 0);
auto& weight_ = unpack(weight, "weight", 1);
@@ -3160,21 +2979,6 @@
}
return result;
}
-std::tuple<Tensor,Tensor,Tensor> _nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, std::array<bool,3> output_mask) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::_nnpack_spatial_convolution_backward(input, grad_output, weight, padding, output_mask);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
-Tensor _nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding) {
- auto result = TypeDefault::_nnpack_spatial_convolution_backward_input(input, grad_output, weight, padding);
- return result;
-}
-Tensor _nnpack_spatial_convolution_backward_weight(const Tensor & input, IntArrayRef weightsize, const Tensor & grad_output, IntArrayRef padding) {
- auto result = TypeDefault::_nnpack_spatial_convolution_backward_weight(input, weightsize, grad_output, padding);
- return result;
-}
int64_t _nnz(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -3239,16 +3043,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-Tensor _pack_padded_sequence_backward(const Tensor & grad, IntArrayRef input_size, const Tensor & batch_sizes, bool batch_first) {
- auto result = TypeDefault::_pack_padded_sequence_backward(grad, input_size, batch_sizes, batch_first);
- return result;
-}
-std::tuple<Tensor,Tensor> _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::_pad_packed_sequence(data, batch_sizes, batch_first, padding_value, total_length);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist) {
auto& grad_ = unpack(grad, "grad", 0);
auto& self_ = unpack(self, "self", 1);
@@ -3356,14 +3150,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-Tensor _remove_batch_dim(const Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
- auto result = TypeDefault::_remove_batch_dim(self, level, batch_size, out_dim);
- return result;
-}
-Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape) {
- auto result = TypeDefault::_reshape_from_tensor(self, shape);
- return result;
-}
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) {
auto& condition_ = unpack(condition, "condition", 0);
auto& self_ = unpack(self, "self", 1);
@@ -3437,28 +3223,6 @@
}
return result;
}
-Tensor _shape_as_tensor(const Tensor & self) {
- auto result = TypeDefault::_shape_as_tensor(self);
- return result;
-}
-std::tuple<Tensor,Tensor> _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<ScalarType> dtype) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
- TypeDefault::_sobol_engine_ff_(self, n, sobolstate, dimension, num_generated);
- return self;
-}
-Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension) {
- TypeDefault::_sobol_engine_initialize_state_(self, dimension);
- return self;
-}
-Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension) {
- TypeDefault::_sobol_engine_scramble_(self, ltm, dimension);
- return self;
-}
Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SoftmaxBackward> grad_fn;
@@ -3627,10 +3391,6 @@
}
return result;
}
-Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::_sparse_coo_tensor_unsafe(indices, values, size, options);
- return result;
-}
Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options) {
auto options_ = TensorOptions(options);
auto tmp = ([&]() {
@@ -3680,14 +3440,6 @@
}
return result;
}
-Tensor _sparse_log_softmax_int(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::_sparse_log_softmax_int(self, dim, dtype);
- return result;
-}
-Tensor _sparse_log_softmax_Dimname(const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::_sparse_log_softmax_Dimname(self, dim, dtype);
- return result;
-}
Tensor _sparse_log_softmax(const Tensor & self, int64_t dim, bool half_to_float) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SparseLogSoftmaxBackward> grad_fn;
@@ -3765,18 +3517,6 @@
}
return result;
}
-Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense) {
- auto result = TypeDefault::_sparse_mm(sparse, dense);
- return result;
-}
-Tensor _sparse_softmax_int(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::_sparse_softmax_int(self, dim, dtype);
- return result;
-}
-Tensor _sparse_softmax_Dimname(const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::_sparse_softmax_Dimname(self, dim, dtype);
- return result;
-}
Tensor _sparse_softmax(const Tensor & self, int64_t dim, bool half_to_float) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SparseSoftmaxBackward> grad_fn;
@@ -3854,14 +3594,6 @@
}
return result;
}
-Tensor _sparse_sum(const Tensor & self) {
- auto result = TypeDefault::_sparse_sum(self);
- return result;
-}
-Tensor _sparse_sum_dtype(const Tensor & self, ScalarType dtype) {
- auto result = TypeDefault::_sparse_sum_dtype(self, dtype);
- return result;
-}
Tensor _sparse_sum_dim(const Tensor & self, IntArrayRef dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SparseSumBackward> grad_fn;
@@ -3892,10 +3624,6 @@
}
return result;
}
-Tensor _sparse_sum_dim_dtype(const Tensor & self, IntArrayRef dim, ScalarType dtype) {
- auto result = TypeDefault::_sparse_sum_dim_dtype(self, dim, dtype);
- return result;
-}
Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim) {
auto& grad_ = unpack(grad, "grad", 0);
auto& self_ = unpack(self, "self", 1);
@@ -4145,28 +3873,6 @@
}
return result;
}
-Tensor _test_serialization_subcmul(const Tensor & self, const Tensor & other, Scalar alpha) {
- auto result = TypeDefault::_test_serialization_subcmul(self, other, alpha);
- return result;
-}
-std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- Tensor result3;
- Tensor result4;
- std::tie(result0, result1, result2, result3, result4) = TypeDefault::_thnn_differentiable_gru_cell_backward(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3), std::move(result4));
-}
-std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _thnn_differentiable_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & input_bias, const Tensor & hidden_bias, const Tensor & cx, const Tensor & cy) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- Tensor result3;
- Tensor result4;
- std::tie(result0, result1, result2, result3, result4) = TypeDefault::_thnn_differentiable_lstm_cell_backward(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3), std::move(result4));
-}
std::tuple<Tensor,Tensor> _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias) {
auto& input_gates_ = unpack(input_gates, "input_gates", 0);
auto& hidden_gates_ = unpack(hidden_gates, "hidden_gates", 1);
@@ -4623,13 +4329,6 @@
#endif
return result;
}
-bool _use_cudnn_rnn_flatten_weight() {
- auto result = TypeDefault::_use_cudnn_rnn_flatten_weight();
- return result;
-}
-void _validate_sparse_coo_tensor_args(const Tensor & indices, const Tensor & values, IntArrayRef size) {
- TypeDefault::_validate_sparse_coo_tensor_args(indices, values, size);
-}
Tensor _values(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -4678,10 +4377,6 @@
}
return result;
}
-Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim) {
- auto result = TypeDefault::_weight_norm(v, g, dim);
- return result;
-}
std::tuple<Tensor,Tensor> _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) {
auto& v_ = unpack(v, "v", 0);
auto& g_ = unpack(g, "g", 1);
@@ -4780,12 +4475,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-std::tuple<Tensor,Tensor> _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::_weight_norm_differentiable_backward(grad_w, saved_v, saved_g, saved_norms, dim);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor abs(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AbsBackward> grad_fn;
@@ -5173,14 +4862,6 @@
}
return out;
}
-Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size) {
- auto result = TypeDefault::adaptive_avg_pool1d(self, output_size);
- return result;
-}
-Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) {
- auto result = TypeDefault::adaptive_avg_pool2d(self, output_size);
- return result;
-}
Tensor & adaptive_avg_pool2d_out_out(Tensor & out, const Tensor & self, IntArrayRef output_size) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -5370,12 +5051,6 @@
}
return out;
}
-std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::adaptive_max_pool1d(self, output_size);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
std::tuple<Tensor,Tensor> adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AdaptiveMaxPool2DBackward> grad_fn;
@@ -7013,10 +6688,6 @@
}
return result;
}
-Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners) {
- auto result = TypeDefault::affine_grid_generator_backward(grad, size, align_corners);
- return result;
-}
Tensor alias(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AliasBackward> grad_fn;
@@ -7051,22 +6722,6 @@
}
return result;
}
-Tensor align_as(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::align_as(self, other);
- return result;
-}
-std::vector<Tensor> align_tensors(TensorList tensors) {
- auto result = TypeDefault::align_tensors(tensors);
- return result;
-}
-Tensor align_to(const Tensor & self, DimnameList names) {
- auto result = TypeDefault::align_to(self, names);
- return result;
-}
-Tensor align_to_ellipsis_idx(const Tensor & self, DimnameList order, int64_t ellipsis_idx) {
- auto result = TypeDefault::align_to_ellipsis_idx(self, order, ellipsis_idx);
- return result;
-}
Tensor all_dim(const Tensor & self, int64_t dim, bool keepdim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AllBackward1> grad_fn;
@@ -7095,10 +6750,6 @@
}
return result;
}
-Tensor all_dimname(const Tensor & self, Dimname dim, bool keepdim) {
- auto result = TypeDefault::all_dimname(self, dim, keepdim);
- return result;
-}
Tensor all(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AllBackward0> grad_fn;
@@ -7165,22 +6816,6 @@
}
return out;
}
-Tensor & all_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim) {
- TypeDefault::all_out_dimname_out(out, self, dim, keepdim);
- return out;
-}
-bool allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) {
- auto result = TypeDefault::allclose(self, other, rtol, atol, equal_nan);
- return result;
-}
-Tensor alpha_dropout(const Tensor & input, double p, bool train) {
- auto result = TypeDefault::alpha_dropout(input, p, train);
- return result;
-}
-Tensor & alpha_dropout_(Tensor & self, double p, bool train) {
- TypeDefault::alpha_dropout_(self, p, train);
- return self;
-}
Tensor angle(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AngleBackward> grad_fn;
@@ -7277,10 +6912,6 @@
}
return result;
}
-Tensor any_dimname(const Tensor & self, Dimname dim, bool keepdim) {
- auto result = TypeDefault::any_dimname(self, dim, keepdim);
- return result;
-}
Tensor any(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AnyBackward0> grad_fn;
@@ -7347,26 +6978,6 @@
}
return out;
}
-Tensor & any_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim) {
- TypeDefault::any_out_dimname_out(out, self, dim, keepdim);
- return out;
-}
-Tensor arange(Scalar end, const TensorOptions & options) {
- auto result = TypeDefault::arange(end, options);
- return result;
-}
-Tensor arange_start(Scalar start, Scalar end, const TensorOptions & options) {
- auto result = TypeDefault::arange_start(start, end, options);
- return result;
-}
-Tensor arange_start_step(Scalar start, Scalar end, Scalar step, const TensorOptions & options) {
- auto result = TypeDefault::arange_start_step(start, end, step, options);
- return result;
-}
-Tensor & arange_out_out(Tensor & out, Scalar end) {
- TypeDefault::arange_out_out(out, end);
- return out;
-}
Tensor & arange_out_start_out(Tensor & out, Scalar start, Scalar end, Scalar step) {
auto& out_ = unpack(out, "out", 0);
#ifndef NDEBUG
@@ -7427,14 +7038,6 @@
#endif
return result;
}
-Tensor argsort(const Tensor & self, int64_t dim, bool descending) {
- auto result = TypeDefault::argsort(self, dim, descending);
- return result;
-}
-Tensor argsort_dimname(const Tensor & self, Dimname dim, bool descending) {
- auto result = TypeDefault::argsort_dimname(self, dim, descending);
- return result;
-}
Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AsStridedBackward> grad_fn;
@@ -8018,34 +7621,6 @@
}
return out;
}
-Tensor atleast_1d(const Tensor & self) {
- auto result = TypeDefault::atleast_1d(self);
- return result;
-}
-std::vector<Tensor> atleast_1d_Sequence(TensorList tensors) {
- auto result = TypeDefault::atleast_1d_Sequence(tensors);
- return result;
-}
-Tensor atleast_2d(const Tensor & self) {
- auto result = TypeDefault::atleast_2d(self);
- return result;
-}
-std::vector<Tensor> atleast_2d_Sequence(TensorList tensors) {
- auto result = TypeDefault::atleast_2d_Sequence(tensors);
- return result;
-}
-Tensor atleast_3d(const Tensor & self) {
- auto result = TypeDefault::atleast_3d(self);
- return result;
-}
-std::vector<Tensor> atleast_3d_Sequence(TensorList tensors) {
- auto result = TypeDefault::atleast_3d_Sequence(tensors);
- return result;
-}
-Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
- auto result = TypeDefault::avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
- return result;
-}
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<AvgPool2DBackward> grad_fn;
@@ -8529,18 +8104,6 @@
}
return out;
}
-Tensor bartlett_window(int64_t window_length, const TensorOptions & options) {
- auto result = TypeDefault::bartlett_window(window_length, options);
- return result;
-}
-Tensor bartlett_window_periodic(int64_t window_length, bool periodic, const TensorOptions & options) {
- auto result = TypeDefault::bartlett_window_periodic(window_length, periodic, options);
- return result;
-}
-Tensor batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
- auto result = TypeDefault::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
- return result;
-}
Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu) {
auto& grad_out_ = unpack(grad_out, "grad_out", 0);
auto& input_ = unpack(input, "input", 1);
@@ -9047,10 +8610,6 @@
}
return result;
}
-Tensor bernoulli_p(const Tensor & self, double p, c10::optional<Generator> generator) {
- auto result = TypeDefault::bernoulli_p(self, p, generator);
- return result;
-}
Tensor & bernoulli__Tensor(Tensor & self, const Tensor & p, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
auto& p_ = unpack(p, "p", 1);
@@ -9156,10 +8715,6 @@
}
return out;
}
-Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias) {
- auto result = TypeDefault::bilinear(input1, input2, weight, bias);
- return result;
-}
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) {
auto& self_ = unpack(self, "self", 0);
auto& target_ = unpack(target, "target", 1);
@@ -9444,10 +8999,6 @@
}
return result;
}
-Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) {
- auto result = TypeDefault::binary_cross_entropy_with_logits_backward(grad_output, self, target, weight, pos_weight, reduction);
- return result;
-}
Tensor bincount(const Tensor & self, const Tensor & weights, int64_t minlength) {
auto& self_ = unpack(self, "self", 0);
auto weights_ = unpack_opt(weights, "weights", 1);
@@ -9520,22 +9071,6 @@
}
return result;
}
-Tensor bitwise_and_Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::bitwise_and_Scalar(self, other);
- return result;
-}
-Tensor bitwise_and_Tensor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::bitwise_and_Tensor(self, other);
- return result;
-}
-Tensor & bitwise_and__Scalar(Tensor & self, Scalar other) {
- TypeDefault::bitwise_and__Scalar(self, other);
- return self;
-}
-Tensor & bitwise_and__Tensor(Tensor & self, const Tensor & other) {
- TypeDefault::bitwise_and__Tensor(self, other);
- return self;
-}
Tensor & bitwise_and_out_Tensor_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -9620,14 +9155,6 @@
}
return out;
}
-Tensor bitwise_not(const Tensor & self) {
- auto result = TypeDefault::bitwise_not(self);
- return result;
-}
-Tensor & bitwise_not_(Tensor & self) {
- TypeDefault::bitwise_not_(self);
- return self;
-}
Tensor & bitwise_not_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -9666,22 +9193,6 @@
}
return out;
}
-Tensor bitwise_or_Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::bitwise_or_Scalar(self, other);
- return result;
-}
-Tensor bitwise_or_Tensor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::bitwise_or_Tensor(self, other);
- return result;
-}
-Tensor & bitwise_or__Scalar(Tensor & self, Scalar other) {
- TypeDefault::bitwise_or__Scalar(self, other);
- return self;
-}
-Tensor & bitwise_or__Tensor(Tensor & self, const Tensor & other) {
- TypeDefault::bitwise_or__Tensor(self, other);
- return self;
-}
Tensor & bitwise_or_out_Tensor_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -9766,22 +9277,6 @@
}
return out;
}
-Tensor bitwise_xor_Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::bitwise_xor_Scalar(self, other);
- return result;
-}
-Tensor bitwise_xor_Tensor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::bitwise_xor_Tensor(self, other);
- return result;
-}
-Tensor & bitwise_xor__Scalar(Tensor & self, Scalar other) {
- TypeDefault::bitwise_xor__Scalar(self, other);
- return self;
-}
-Tensor & bitwise_xor__Tensor(Tensor & self, const Tensor & other) {
- TypeDefault::bitwise_xor__Tensor(self, other);
- return self;
-}
Tensor & bitwise_xor_out_Tensor_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -9866,18 +9361,6 @@
}
return out;
}
-Tensor blackman_window(int64_t window_length, const TensorOptions & options) {
- auto result = TypeDefault::blackman_window(window_length, options);
- return result;
-}
-Tensor blackman_window_periodic(int64_t window_length, bool periodic, const TensorOptions & options) {
- auto result = TypeDefault::blackman_window_periodic(window_length, periodic, options);
- return result;
-}
-Tensor block_diag(TensorList tensors) {
- auto result = TypeDefault::block_diag(tensors);
- return result;
-}
Tensor bmm(const Tensor & self, const Tensor & mat2) {
auto& self_ = unpack(self, "self", 0);
auto& mat2_ = unpack(mat2, "mat2", 1);
@@ -9966,10 +9449,6 @@
}
return out;
}
-std::vector<Tensor> broadcast_tensors(TensorList tensors) {
- auto result = TypeDefault::broadcast_tensors(tensors);
- return result;
-}
Tensor bucketize_Tensor(const Tensor & self, const Tensor & boundaries, bool out_int32, bool right) {
auto& self_ = unpack(self, "self", 0);
auto& boundaries_ = unpack(boundaries, "boundaries", 1);
@@ -10080,14 +9559,6 @@
}
return out;
}
-bool can_cast(ScalarType from, ScalarType to) {
- auto result = TypeDefault::can_cast(from, to);
- return result;
-}
-Tensor cartesian_prod(TensorList tensors) {
- auto result = TypeDefault::cartesian_prod(tensors);
- return result;
-}
Tensor cat(TensorList tensors, int64_t dim) {
auto tensors_ = unpack(tensors, "tensors", 0);
std::shared_ptr<CatBackward> grad_fn;
@@ -10127,10 +9598,6 @@
}
return result;
}
-Tensor cat_names(TensorList tensors, Dimname dim) {
- auto result = TypeDefault::cat_names(tensors, dim);
- return result;
-}
Tensor & cat_out_out(Tensor & out, TensorList tensors, int64_t dim) {
auto& out_ = unpack(out, "out", 0);
auto tensors_ = unpack(tensors, "tensors", 1);
@@ -10177,10 +9644,6 @@
}
return out;
}
-Tensor & cat_out_names_out(Tensor & out, TensorList tensors, Dimname dim) {
- TypeDefault::cat_out_names_out(out, tensors, dim);
- return out;
-}
Tensor & cauchy_(Tensor & self, double median, double sigma, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -10210,10 +9673,6 @@
}
return self;
}
-Tensor cdist(const Tensor & x1, const Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
- auto result = TypeDefault::cdist(x1, x2, p, compute_mode);
- return result;
-}
Tensor ceil(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CeilBackward> grad_fn;
@@ -10374,10 +9833,6 @@
}
return self;
}
-Tensor chain_matmul(TensorList matrices) {
- auto result = TypeDefault::chain_matmul(matrices);
- return result;
-}
Tensor channel_shuffle(const Tensor & self, int64_t groups) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NotImplemented> grad_fn;
@@ -10635,10 +10090,6 @@
}
return out;
}
-std::vector<Tensor> chunk(const Tensor & self, int64_t chunks, int64_t dim) {
- auto result = TypeDefault::chunk(self, chunks, dim);
- return result;
-}
Tensor clamp(const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ClampBackward> grad_fn;
@@ -11136,10 +10587,6 @@
}
return out;
}
-Tensor combinations(const Tensor & self, int64_t r, bool with_replacement) {
- auto result = TypeDefault::combinations(self, r, with_replacement);
- return result;
-}
Tensor conj(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ConjBackward> grad_fn;
@@ -11235,22 +10682,6 @@
}
return result;
}
-Tensor contiguous(const Tensor & self, MemoryFormat memory_format) {
- auto result = TypeDefault::contiguous(self, memory_format);
- return result;
-}
-Tensor conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
- auto result = TypeDefault::conv1d(input, weight, bias, stride, padding, dilation, groups);
- return result;
-}
-Tensor conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
- auto result = TypeDefault::conv2d(input, weight, bias, stride, padding, dilation, groups);
- return result;
-}
-Tensor conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) {
- auto result = TypeDefault::conv3d(input, weight, bias, stride, padding, dilation, groups);
- return result;
-}
Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) {
auto& self_ = unpack(self, "self", 0);
auto& weight_ = unpack(weight, "weight", 1);
@@ -11299,29 +10730,6 @@
}
return result;
}
-std::tuple<Tensor,Tensor,Tensor> conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::conv_tbc_backward(self, input, weight, bias, pad);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
-Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
- auto result = TypeDefault::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation);
- return result;
-}
-Tensor conv_transpose2d_input(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
- auto result = TypeDefault::conv_transpose2d_input(input, weight, bias, stride, padding, output_padding, groups, dilation);
- return result;
-}
-Tensor conv_transpose3d_input(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) {
- auto result = TypeDefault::conv_transpose3d_input(input, weight, bias, stride, padding, output_padding, groups, dilation);
- return result;
-}
-Tensor convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) {
- auto result = TypeDefault::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
- return result;
-}
std::tuple<Tensor,Tensor,Tensor> convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, std::array<bool,3> output_mask) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& input_ = unpack(input, "input", 1);
@@ -11660,14 +11068,6 @@
}
return out;
}
-Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) {
- auto result = TypeDefault::cosine_embedding_loss(input1, input2, target, margin, reduction);
- return result;
-}
-Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps) {
- auto result = TypeDefault::cosine_similarity(x1, x2, dim, eps);
- return result;
-}
Tensor count_nonzero_dim_IntList(const Tensor & self, IntArrayRef dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CountNonzeroBackward0> grad_fn;
@@ -11813,14 +11213,6 @@
}
return out;
}
-Tensor ctc_loss_IntList(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
- auto result = TypeDefault::ctc_loss_IntList(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
- return result;
-}
-Tensor ctc_loss_Tensor(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
- auto result = TypeDefault::ctc_loss_Tensor(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
- return result;
-}
Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
auto& theta_ = unpack(theta, "theta", 0);
std::shared_ptr<CudnnAffineGridGeneratorBackward> grad_fn;
@@ -12572,10 +11964,6 @@
}
return std::make_tuple(std::move(grad_self), std::move(grad_grid));
}
-bool cudnn_is_acceptable(const Tensor & self) {
- auto result = TypeDefault::cudnn_is_acceptable(self);
- return result;
-}
std::tuple<Tensor,Tensor> cummax(const Tensor & self, int64_t dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CummaxBackward> grad_fn;
@@ -12611,12 +11999,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> cummax_dimname(const Tensor & self, Dimname dim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::cummax_dimname(self, dim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
std::tuple<Tensor &,Tensor &> cummax_out_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim) {
auto& values_ = unpack(values, "values", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -12664,10 +12046,6 @@
}
return std::forward_as_tuple(values, indices);
}
-std::tuple<Tensor &,Tensor &> cummax_out_dimname_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim) {
- TypeDefault::cummax_out_dimname_out(values, indices, self, dim);
- return std::forward_as_tuple(values, indices);
-}
std::tuple<Tensor,Tensor> cummin(const Tensor & self, int64_t dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CumminBackward> grad_fn;
@@ -12703,12 +12081,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> cummin_dimname(const Tensor & self, Dimname dim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::cummin_dimname(self, dim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
std::tuple<Tensor &,Tensor &> cummin_out_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim) {
auto& values_ = unpack(values, "values", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -12756,10 +12128,6 @@
}
return std::forward_as_tuple(values, indices);
}
-std::tuple<Tensor &,Tensor &> cummin_out_dimname_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim) {
- TypeDefault::cummin_out_dimname_out(values, indices, self, dim);
- return std::forward_as_tuple(values, indices);
-}
Tensor cumprod(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CumprodBackward> grad_fn;
@@ -12791,10 +12159,6 @@
}
return result;
}
-Tensor cumprod_dimname(const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::cumprod_dimname(self, dim, dtype);
- return result;
-}
Tensor & cumprod_out_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -12833,10 +12197,6 @@
}
return out;
}
-Tensor & cumprod_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- TypeDefault::cumprod_out_dimname_out(out, self, dim, dtype);
- return out;
-}
Tensor cumsum(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<CumsumBackward> grad_fn;
@@ -12867,10 +12227,6 @@
}
return result;
}
-Tensor cumsum_dimname(const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::cumsum_dimname(self, dim, dtype);
- return result;
-}
Tensor & cumsum_out_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -12909,10 +12265,6 @@
}
return out;
}
-Tensor & cumsum_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- TypeDefault::cumsum_out_dimname_out(out, self, dim, dtype);
- return out;
-}
Tensor deg2rad(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<Deg2RadBackward> grad_fn;
@@ -13154,10 +12506,6 @@
}
return result;
}
-Tensor diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
- auto result = TypeDefault::diag_embed(self, offset, dim1, dim2);
- return result;
-}
Tensor & diag_out_out(Tensor & out, const Tensor & self, int64_t diagonal) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -13196,10 +12544,6 @@
}
return out;
}
-Tensor diagflat(const Tensor & self, int64_t offset) {
- auto result = TypeDefault::diagflat(self, offset);
- return result;
-}
Tensor diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<DiagonalBackward> grad_fn;
@@ -13238,10 +12582,6 @@
}
return result;
}
-Tensor diagonal_Dimname(const Tensor & self, Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset) {
- auto result = TypeDefault::diagonal_Dimname(self, outdim, dim1, dim2, offset);
- return result;
-}
Tensor digamma(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<DigammaBackward> grad_fn;
@@ -13655,14 +12995,6 @@
}
return out;
}
-Tensor dropout(const Tensor & input, double p, bool train) {
- auto result = TypeDefault::dropout(input, p, train);
- return result;
-}
-Tensor & dropout_(Tensor & self, double p, bool train) {
- TypeDefault::dropout_(self, p, train);
- return self;
-}
std::tuple<Tensor,Tensor> eig(const Tensor & self, bool eigenvectors) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<EigBackward> grad_fn;
@@ -13746,10 +13078,6 @@
}
return std::forward_as_tuple(e, v);
}
-Tensor einsum(std::string equation, TensorList tensors) {
- auto result = TypeDefault::einsum(equation, tensors);
- return result;
-}
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<EluBackward> grad_fn;
@@ -13985,18 +13313,6 @@
}
return result;
}
-Tensor embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) {
- auto result = TypeDefault::embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
- return result;
-}
-std::tuple<Tensor,Tensor,Tensor,Tensor> embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights, bool include_last_offset) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- Tensor result3;
- std::tie(result0, result1, result2, result3) = TypeDefault::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3));
-}
Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -14071,14 +13387,6 @@
}
return self;
}
-Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
- auto result = TypeDefault::embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
- return result;
-}
-Tensor empty_names(IntArrayRef size, c10::optional<DimnameList> names, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::empty_names(size, names, options, memory_format);
- return result;
-}
Tensor empty_memory_format(IntArrayRef size, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
auto options_ = TensorOptions(options);
auto tmp = ([&]() {
@@ -14088,18 +13396,6 @@
auto result = std::move(tmp);
return result;
}
-Tensor empty_like(const Tensor & self, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::empty_like(self, options, memory_format);
- return result;
-}
-Tensor empty_meta(IntArrayRef size, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::empty_meta(size, options, memory_format);
- return result;
-}
-Tensor & empty_out_out(Tensor & out, IntArrayRef size, c10::optional<MemoryFormat> memory_format) {
- TypeDefault::empty_out_out(out, size, memory_format);
- return out;
-}
Tensor empty_quantized(IntArrayRef size, const Tensor & qtensor) {
auto& qtensor_ = unpack(qtensor, "qtensor", 1);
std::shared_ptr<NotImplemented> grad_fn;
@@ -14774,10 +14070,6 @@
}
return result;
}
-Tensor expand_as(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::expand_as(self, other);
- return result;
-}
Tensor expm1(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<Expm1Backward> grad_fn;
@@ -14908,14 +14200,6 @@
}
return self;
}
-Tensor eye(int64_t n, const TensorOptions & options) {
- auto result = TypeDefault::eye(n, options);
- return result;
-}
-Tensor eye_m(int64_t n, int64_t m, const TensorOptions & options) {
- auto result = TypeDefault::eye_m(n, m, options);
- return result;
-}
Tensor & eye_out_out(Tensor & out, int64_t n) {
auto& out_ = unpack(out, "out", 0);
#ifndef NDEBUG
@@ -15008,10 +14292,6 @@
}
return result;
}
-Tensor fake_quantize_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
- auto result = TypeDefault::fake_quantize_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max);
- return result;
-}
Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FakeQuantizePerTensorAffineBackward> grad_fn;
@@ -15045,66 +14325,6 @@
}
return result;
}
-Tensor fake_quantize_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
- auto result = TypeDefault::fake_quantize_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max);
- return result;
-}
-Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias) {
- auto result = TypeDefault::fbgemm_linear_fp16_weight(input, packed_weight, bias);
- return result;
-}
-Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias) {
- auto result = TypeDefault::fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias);
- return result;
-}
-Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) {
- auto result = TypeDefault::fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
- return result;
-}
-Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) {
- auto result = TypeDefault::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
- return result;
-}
-std::tuple<Tensor,Tensor,double,int64_t> fbgemm_linear_quantize_weight(const Tensor & input) {
- Tensor result0;
- Tensor result1;
- double result2;
- int64_t result3;
- std::tie(result0, result1, result2, result3) = TypeDefault::fbgemm_linear_quantize_weight(input);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2), std::move(result3));
-}
-Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input) {
- auto result = TypeDefault::fbgemm_pack_gemm_matrix_fp16(input);
- return result;
-}
-Tensor fbgemm_pack_quantized_matrix(const Tensor & input) {
- auto result = TypeDefault::fbgemm_pack_quantized_matrix(input);
- return result;
-}
-Tensor fbgemm_pack_quantized_matrix_KN(const Tensor & input, int64_t K, int64_t N) {
- auto result = TypeDefault::fbgemm_pack_quantized_matrix_KN(input, K, N);
- return result;
-}
-Tensor feature_alpha_dropout(const Tensor & input, double p, bool train) {
- auto result = TypeDefault::feature_alpha_dropout(input, p, train);
- return result;
-}
-Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train) {
- TypeDefault::feature_alpha_dropout_(self, p, train);
- return self;
-}
-Tensor feature_dropout(const Tensor & input, double p, bool train) {
- auto result = TypeDefault::feature_dropout(input, p, train);
- return result;
-}
-Tensor & feature_dropout_(Tensor & self, double p, bool train) {
- TypeDefault::feature_dropout_(self, p, train);
- return self;
-}
-Tensor fft(const Tensor & self, int64_t signal_ndim, bool normalized) {
- auto result = TypeDefault::fft(self, signal_ndim, normalized);
- return result;
-}
Tensor & fill__Scalar(Tensor & self, Scalar value) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -15171,26 +14391,6 @@
}
return self;
}
-Tensor & fill_diagonal_(Tensor & self, Scalar fill_value, bool wrap) {
- TypeDefault::fill_diagonal_(self, fill_value, wrap);
- return self;
-}
-Tensor flatten_using_ints(const Tensor & self, int64_t start_dim, int64_t end_dim) {
- auto result = TypeDefault::flatten_using_ints(self, start_dim, end_dim);
- return result;
-}
-Tensor flatten_named_out_dim(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim) {
- auto result = TypeDefault::flatten_named_out_dim(self, start_dim, end_dim, out_dim);
- return result;
-}
-Tensor flatten_using_names(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim) {
- auto result = TypeDefault::flatten_using_names(self, start_dim, end_dim, out_dim);
- return result;
-}
-Tensor flatten_DimnameList(const Tensor & self, DimnameList dims, Dimname out_dim) {
- auto result = TypeDefault::flatten_DimnameList(self, dims, out_dim);
- return result;
-}
Tensor flip(const Tensor & self, IntArrayRef dims) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FlipBackward> grad_fn;
@@ -15220,14 +14420,6 @@
}
return result;
}
-Tensor fliplr(const Tensor & self) {
- auto result = TypeDefault::fliplr(self);
- return result;
-}
-Tensor flipud(const Tensor & self) {
- auto result = TypeDefault::flipud(self);
- return result;
-}
Tensor floor(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<FloorBackward> grad_fn;
@@ -15321,10 +14513,6 @@
}
return result;
}
-Tensor floor_divide_Scalar(const Tensor & self, Scalar other) {
- auto result = TypeDefault::floor_divide_Scalar(self, other);
- return result;
-}
Tensor & floor_divide__Tensor(Tensor & self, const Tensor & other) {
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
@@ -15362,10 +14550,6 @@
}
return self;
}
-Tensor & floor_divide__Scalar(Tensor & self, Scalar other) {
- TypeDefault::floor_divide__Scalar(self, other);
- return self;
-}
Tensor & floor_divide_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -16167,18 +15351,6 @@
}
return std::forward_as_tuple(output, indices);
}
-Tensor frobenius_norm(const Tensor & self) {
- auto result = TypeDefault::frobenius_norm(self);
- return result;
-}
-Tensor frobenius_norm_dim(const Tensor & self, IntArrayRef dim, bool keepdim) {
- auto result = TypeDefault::frobenius_norm_dim(self, dim, keepdim);
- return result;
-}
-Tensor & frobenius_norm_out_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim) {
- TypeDefault::frobenius_norm_out_out(out, self, dim, keepdim);
- return out;
-}
Tensor from_file(std::string filename, c10::optional<bool> shared, c10::optional<int64_t> size, const TensorOptions & options) {
auto options_ = TensorOptions(options);
auto tmp = ([&]() {
@@ -16188,22 +15360,6 @@
auto result = std::move(tmp);
return result;
}
-Tensor full_names(IntArrayRef size, Scalar fill_value, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::full_names(size, fill_value, names, options);
- return result;
-}
-Tensor full(IntArrayRef size, Scalar fill_value, const TensorOptions & options) {
- auto result = TypeDefault::full(size, fill_value, options);
- return result;
-}
-Tensor full_like(const Tensor & self, Scalar fill_value, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::full_like(self, fill_value, options, memory_format);
- return result;
-}
-Tensor & full_out_out(Tensor & out, IntArrayRef size, Scalar fill_value) {
- TypeDefault::full_out_out(out, size, fill_value);
- return out;
-}
Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -16245,10 +15401,6 @@
}
return result;
}
-Tensor gather_dimname(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad) {
- auto result = TypeDefault::gather_dimname(self, dim, index, sparse_grad);
- return result;
-}
Tensor & gather_out_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -16295,22 +15447,6 @@
}
return out;
}
-Tensor & gather_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad) {
- TypeDefault::gather_out_dimname_out(out, self, dim, index, sparse_grad);
- return out;
-}
-Tensor gcd(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::gcd(self, other);
- return result;
-}
-Tensor & gcd_(Tensor & self, const Tensor & other) {
- TypeDefault::gcd_(self, other);
- return self;
-}
-Tensor & gcd_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
- TypeDefault::gcd_out_out(out, self, other);
- return out;
-}
Tensor ge_Scalar(const Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -16904,10 +16040,6 @@
}
return out;
}
-Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
- auto result = TypeDefault::grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners);
- return result;
-}
Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
auto& input_ = unpack(input, "input", 0);
auto& grid_ = unpack(grid, "grid", 1);
@@ -17082,26 +16214,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-Tensor group_norm(const Tensor & input, int64_t num_groups, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enabled) {
- auto result = TypeDefault::group_norm(input, num_groups, weight, bias, eps, cudnn_enabled);
- return result;
-}
-std::tuple<Tensor,Tensor> gru_input(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::gru_input(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-std::tuple<Tensor,Tensor> gru_data(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::gru_data(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) {
- auto result = TypeDefault::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
- return result;
-}
Tensor gt_Scalar(const Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -17283,30 +16395,6 @@
increment_version(out);
return out;
}
-Tensor hamming_window(int64_t window_length, const TensorOptions & options) {
- auto result = TypeDefault::hamming_window(window_length, options);
- return result;
-}
-Tensor hamming_window_periodic(int64_t window_length, bool periodic, const TensorOptions & options) {
- auto result = TypeDefault::hamming_window_periodic(window_length, periodic, options);
- return result;
-}
-Tensor hamming_window_periodic_alpha(int64_t window_length, bool periodic, double alpha, const TensorOptions & options) {
- auto result = TypeDefault::hamming_window_periodic_alpha(window_length, periodic, alpha, options);
- return result;
-}
-Tensor hamming_window_periodic_alpha_beta(int64_t window_length, bool periodic, double alpha, double beta, const TensorOptions & options) {
- auto result = TypeDefault::hamming_window_periodic_alpha_beta(window_length, periodic, alpha, beta, options);
- return result;
-}
-Tensor hann_window(int64_t window_length, const TensorOptions & options) {
- auto result = TypeDefault::hann_window(window_length, options);
- return result;
-}
-Tensor hann_window_periodic(int64_t window_length, bool periodic, const TensorOptions & options) {
- auto result = TypeDefault::hann_window_periodic(window_length, periodic, options);
- return result;
-}
Tensor hardshrink(const Tensor & self, Scalar lambd) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<HardshrinkBackward> grad_fn;
@@ -17829,10 +16917,6 @@
}
return out;
}
-Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction) {
- auto result = TypeDefault::hinge_embedding_loss(self, target, margin, reduction);
- return result;
-}
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<HistcBackward> grad_fn;
@@ -17981,10 +17065,6 @@
}
return out;
}
-Tensor ifft(const Tensor & self, int64_t signal_ndim, bool normalized) {
- auto result = TypeDefault::ifft(self, signal_ndim, normalized);
- return result;
-}
Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<Im2ColBackward> grad_fn;
@@ -18127,10 +17207,6 @@
}
return out;
}
-Tensor imag(const Tensor & self) {
- auto result = TypeDefault::imag(self);
- return result;
-}
Tensor index_Tensor(const Tensor & self, TensorList indices) {
auto& self_ = unpack(self, "self", 0);
auto indices_ = unpack(indices, "indices", 1);
@@ -18178,14 +17254,6 @@
}
return result;
}
-Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
- auto result = TypeDefault::index_add(self, dim, index, source);
- return result;
-}
-Tensor index_add_dimname(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source) {
- auto result = TypeDefault::index_add_dimname(self, dim, index, source);
- return result;
-}
Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -18238,14 +17306,6 @@
}
return self;
}
-Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
- auto result = TypeDefault::index_copy(self, dim, index, source);
- return result;
-}
-Tensor index_copy_dimname(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source) {
- auto result = TypeDefault::index_copy_dimname(self, dim, index, source);
- return result;
-}
Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -18296,26 +17356,6 @@
}
return self;
}
-Tensor & index_copy__dimname(Tensor & self, Dimname dim, const Tensor & index, const Tensor & source) {
- TypeDefault::index_copy__dimname(self, dim, index, source);
- return self;
-}
-Tensor index_fill_int_Scalar(const Tensor & self, int64_t dim, const Tensor & index, Scalar value) {
- auto result = TypeDefault::index_fill_int_Scalar(self, dim, index, value);
- return result;
-}
-Tensor index_fill_int_Tensor(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) {
- auto result = TypeDefault::index_fill_int_Tensor(self, dim, index, value);
- return result;
-}
-Tensor index_fill_Dimname_Scalar(const Tensor & self, Dimname dim, const Tensor & index, Scalar value) {
- auto result = TypeDefault::index_fill_Dimname_Scalar(self, dim, index, value);
- return result;
-}
-Tensor index_fill_Dimname_Tensor(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value) {
- auto result = TypeDefault::index_fill_Dimname_Tensor(self, dim, index, value);
- return result;
-}
Tensor & index_fill__int_Scalar(Tensor & self, int64_t dim, const Tensor & index, Scalar value) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -18402,18 +17442,6 @@
}
return self;
}
-Tensor & index_fill__Dimname_Scalar(Tensor & self, Dimname dim, const Tensor & index, Scalar value) {
- TypeDefault::index_fill__Dimname_Scalar(self, dim, index, value);
- return self;
-}
-Tensor & index_fill__Dimname_Tensor(Tensor & self, Dimname dim, const Tensor & index, const Tensor & value) {
- TypeDefault::index_fill__Dimname_Tensor(self, dim, index, value);
- return self;
-}
-Tensor index_put(const Tensor & self, TensorList indices, const Tensor & values, bool accumulate) {
- auto result = TypeDefault::index_put(self, indices, values, accumulate);
- return result;
-}
Tensor & index_put_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate) {
auto& self_ = unpack(self, "self", 0);
auto indices_ = unpack(indices, "indices", 1);
@@ -18510,10 +17538,6 @@
}
return result;
}
-Tensor index_select_dimname(const Tensor & self, Dimname dim, const Tensor & index) {
- auto result = TypeDefault::index_select_dimname(self, dim, index);
- return result;
-}
Tensor & index_select_out_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -18560,10 +17584,6 @@
}
return out;
}
-Tensor & index_select_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index) {
- TypeDefault::index_select_out_dimname_out(out, self, dim, index);
- return out;
-}
Tensor indices(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -18584,10 +17604,6 @@
#endif
return result;
}
-Tensor instance_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
- auto result = TypeDefault::instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
- return result;
-}
Tensor int_repr(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NotImplemented> grad_fn;
@@ -18685,10 +17701,6 @@
}
return out;
}
-Tensor irfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) {
- auto result = TypeDefault::irfft(self, signal_ndim, normalized, onesided, signal_sizes);
- return result;
-}
bool is_coalesced(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -18709,30 +17721,6 @@
#endif
return result;
}
-bool is_complex(const Tensor & self) {
- auto result = TypeDefault::is_complex(self);
- return result;
-}
-bool is_distributed(const Tensor & self) {
- auto result = TypeDefault::is_distributed(self);
- return result;
-}
-bool is_floating_point(const Tensor & self) {
- auto result = TypeDefault::is_floating_point(self);
- return result;
-}
-bool is_nonzero(const Tensor & self) {
- auto result = TypeDefault::is_nonzero(self);
- return result;
-}
-bool is_pinned(const Tensor & self) {
- auto result = TypeDefault::is_pinned(self);
- return result;
-}
-bool is_same_size(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::is_same_size(self, other);
- return result;
-}
bool is_set_to(const Tensor & self, const Tensor & tensor) {
auto& self_ = unpack(self, "self", 0);
auto& tensor_ = unpack(tensor, "tensor", 1);
@@ -18761,26 +17749,6 @@
#endif
return result;
}
-bool is_signed(const Tensor & self) {
- auto result = TypeDefault::is_signed(self);
- return result;
-}
-bool is_vulkan_available() {
- auto result = TypeDefault::is_vulkan_available();
- return result;
-}
-Tensor isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) {
- auto result = TypeDefault::isclose(self, other, rtol, atol, equal_nan);
- return result;
-}
-Tensor isfinite(const Tensor & self) {
- auto result = TypeDefault::isfinite(self);
- return result;
-}
-Tensor isinf(const Tensor & self) {
- auto result = TypeDefault::isinf(self);
- return result;
-}
Tensor isnan(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -18801,10 +17769,6 @@
#endif
return result;
}
-Tensor isneginf(const Tensor & self) {
- auto result = TypeDefault::isneginf(self);
- return result;
-}
Tensor & isneginf_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -18843,10 +17807,6 @@
}
return out;
}
-Tensor isposinf(const Tensor & self) {
- auto result = TypeDefault::isposinf(self);
- return result;
-}
Tensor & isposinf_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -18885,18 +17845,6 @@
}
return out;
}
-Tensor isreal(const Tensor & self) {
- auto result = TypeDefault::isreal(self);
- return result;
-}
-Tensor istft(const Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const Tensor & window, bool center, bool normalized, bool onesided, c10::optional<int64_t> length) {
- auto result = TypeDefault::istft(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length);
- return result;
-}
-Scalar item(const Tensor & self) {
- auto result = TypeDefault::item(self);
- return result;
-}
Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction, bool log_target) {
auto& self_ = unpack(self, "self", 0);
auto& target_ = unpack(target, "target", 1);
@@ -19021,12 +17969,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> kthvalue_dimname(const Tensor & self, int64_t k, Dimname dim, bool keepdim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::kthvalue_dimname(self, k, dim, keepdim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
std::tuple<Tensor &,Tensor &> kthvalue_out_values(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim) {
auto& values_ = unpack(values, "values", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -19074,10 +18016,6 @@
}
return std::forward_as_tuple(values, indices);
}
-std::tuple<Tensor &,Tensor &> kthvalue_out_dimname_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, Dimname dim, bool keepdim) {
- TypeDefault::kthvalue_out_dimname_out(values, indices, self, k, dim, keepdim);
- return std::forward_as_tuple(values, indices);
-}
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
auto& self_ = unpack(self, "self", 0);
auto& target_ = unpack(target, "target", 1);
@@ -19266,22 +18204,6 @@
}
return out;
}
-Tensor layer_norm(const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable) {
- auto result = TypeDefault::layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable);
- return result;
-}
-Tensor lcm(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::lcm(self, other);
- return result;
-}
-Tensor & lcm_(Tensor & self, const Tensor & other) {
- TypeDefault::lcm_(self, other);
- return self;
-}
-Tensor & lcm_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
- TypeDefault::lcm_out_out(out, self, other);
- return out;
-}
Tensor le_Scalar(const Tensor & self, Scalar other) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -19977,14 +18899,6 @@
}
return out;
}
-Tensor linear(const Tensor & input, const Tensor & weight, const Tensor & bias) {
- auto result = TypeDefault::linear(input, weight, bias);
- return result;
-}
-Tensor linspace(Scalar start, Scalar end, int64_t steps, const TensorOptions & options) {
- auto result = TypeDefault::linspace(start, end, steps, options);
- return result;
-}
Tensor & linspace_out_out(Tensor & out, Scalar start, Scalar end, int64_t steps) {
auto& out_ = unpack(out, "out", 0);
#ifndef NDEBUG
@@ -20422,10 +19336,6 @@
}
return out;
}
-Tensor log_sigmoid(const Tensor & self) {
- auto result = TypeDefault::log_sigmoid(self);
- return result;
-}
Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -20609,18 +19519,6 @@
}
return std::forward_as_tuple(output, buffer);
}
-Tensor & log_sigmoid_out_out(Tensor & out, const Tensor & self) {
- TypeDefault::log_sigmoid_out_out(out, self);
- return out;
-}
-Tensor log_softmax_int(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::log_softmax_int(self, dim, dtype);
- return result;
-}
-Tensor log_softmax_Dimname(const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::log_softmax_Dimname(self, dim, dtype);
- return result;
-}
Tensor logaddexp(const Tensor & self, const Tensor & other) {
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
@@ -20822,10 +19720,6 @@
}
return result;
}
-Tensor logcumsumexp_dimname(const Tensor & self, Dimname dim) {
- auto result = TypeDefault::logcumsumexp_dimname(self, dim);
- return result;
-}
Tensor & logcumsumexp_out_out(Tensor & out, const Tensor & self, int64_t dim) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -20864,10 +19758,6 @@
}
return out;
}
-Tensor & logcumsumexp_out_dimname_out(Tensor & out, const Tensor & self, Dimname dim) {
- TypeDefault::logcumsumexp_out_dimname_out(out, self, dim);
- return out;
-}
Tensor logdet(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<LogdetBackward> grad_fn;
@@ -20900,14 +19790,6 @@
}
return result;
}
-Tensor logical_and(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::logical_and(self, other);
- return result;
-}
-Tensor & logical_and_(Tensor & self, const Tensor & other) {
- TypeDefault::logical_and_(self, other);
- return self;
-}
Tensor & logical_and_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -20954,14 +19836,6 @@
}
return out;
}
-Tensor logical_not(const Tensor & self) {
- auto result = TypeDefault::logical_not(self);
- return result;
-}
-Tensor & logical_not_(Tensor & self) {
- TypeDefault::logical_not_(self);
- return self;
-}
Tensor & logical_not_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -21000,14 +19874,6 @@
}
return out;
}
-Tensor logical_or(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::logical_or(self, other);
- return result;
-}
-Tensor & logical_or_(Tensor & self, const Tensor & other) {
- TypeDefault::logical_or_(self, other);
- return self;
-}
Tensor & logical_or_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -21054,14 +19920,6 @@
}
return out;
}
-Tensor logical_xor(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::logical_xor(self, other);
- return result;
-}
-Tensor & logical_xor_(Tensor & self, const Tensor & other) {
- TypeDefault::logical_xor_(self, other);
- return self;
-}
Tensor & logical_xor_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -21169,10 +20027,6 @@
}
return self;
}
-Tensor logit_backward(const Tensor & grad_output, const Tensor & self, c10::optional<double> eps) {
- auto result = TypeDefault::logit_backward(grad_output, self, eps);
- return result;
-}
Tensor & logit_backward_out_grad_input(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, c10::optional<double> eps) {
auto& grad_input_ = unpack(grad_input, "grad_input", 0);
auto& grad_output_ = unpack(grad_output, "grad_output", 1);
@@ -21257,10 +20111,6 @@
}
return out;
}
-Tensor logspace(Scalar start, Scalar end, int64_t steps, double base, const TensorOptions & options) {
- auto result = TypeDefault::logspace(start, end, steps, base, options);
- return result;
-}
Tensor & logspace_out_out(Tensor & out, Scalar start, Scalar end, int64_t steps, double base) {
auto& out_ = unpack(out, "out", 0);
#ifndef NDEBUG
@@ -21315,10 +20165,6 @@
}
return result;
}
-Tensor logsumexp_names(const Tensor & self, DimnameList dim, bool keepdim) {
- auto result = TypeDefault::logsumexp_names(self, dim, keepdim);
- return result;
-}
Tensor & logsumexp_out_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -21357,30 +20203,6 @@
}
return out;
}
-Tensor & logsumexp_out_names_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim) {
- TypeDefault::logsumexp_out_names_out(out, self, dim, keepdim);
- return out;
-}
-std::tuple<Tensor,Tensor,Tensor> lstm_input(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::lstm_input(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
-std::tuple<Tensor,Tensor,Tensor> lstm_data(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- Tensor result0;
- Tensor result1;
- Tensor result2;
- std::tie(result0, result1, result2) = TypeDefault::lstm_data(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
-}
-std::tuple<Tensor,Tensor> lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
std::tuple<Tensor,Tensor> lstsq(const Tensor & self, const Tensor & A) {
auto& self_ = unpack(self, "self", 0);
auto& A_ = unpack(A, "A", 1);
@@ -21755,18 +20577,6 @@
}
return out;
}
-Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) {
- auto result = TypeDefault::margin_ranking_loss(input1, input2, target, margin, reduction);
- return result;
-}
-Tensor masked_fill_Scalar(const Tensor & self, const Tensor & mask, Scalar value) {
- auto result = TypeDefault::masked_fill_Scalar(self, mask, value);
- return result;
-}
-Tensor masked_fill_Tensor(const Tensor & self, const Tensor & mask, const Tensor & value) {
- auto result = TypeDefault::masked_fill_Tensor(self, mask, value);
- return result;
-}
Tensor & masked_fill__Scalar(Tensor & self, const Tensor & mask, Scalar value) {
auto& self_ = unpack(self, "self", 0);
auto& mask_ = unpack(mask, "mask", 1);
@@ -21851,10 +20661,6 @@
}
return self;
}
-Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source) {
- auto result = TypeDefault::masked_scatter(self, mask, source);
- return result;
-}
Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) {
auto& self_ = unpack(self, "self", 0);
auto& mask_ = unpack(mask, "mask", 1);
@@ -21988,26 +20794,6 @@
}
return out;
}
-Tensor matmul(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::matmul(self, other);
- return result;
-}
-Tensor & matmul_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
- TypeDefault::matmul_out_out(out, self, other);
- return out;
-}
-Tensor matrix_power(const Tensor & self, int64_t n) {
- auto result = TypeDefault::matrix_power(self, n);
- return result;
-}
-Tensor matrix_rank_tol(const Tensor & self, double tol, bool symmetric) {
- auto result = TypeDefault::matrix_rank_tol(self, tol, symmetric);
- return result;
-}
-Tensor matrix_rank(const Tensor & self, bool symmetric) {
- auto result = TypeDefault::matrix_rank(self, symmetric);
- return result;
-}
std::tuple<Tensor,Tensor> max_dim(const Tensor & self, int64_t dim, bool keepdim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxBackward0> grad_fn;
@@ -22044,12 +20830,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> max_names_dim(const Tensor & self, Dimname dim, bool keepdim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::max_names_dim(self, dim, keepdim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
Tensor max_other(const Tensor & self, const Tensor & other) {
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
@@ -22167,10 +20947,6 @@
}
return std::forward_as_tuple(max, max_values);
}
-std::tuple<Tensor &,Tensor &> max_out_names_dim_max(Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) {
- TypeDefault::max_out_names_dim_max(max, max_values, self, dim, keepdim);
- return std::forward_as_tuple(max, max_values);
-}
Tensor & max_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -22217,20 +20993,6 @@
}
return out;
}
-Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
- auto result = TypeDefault::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode);
- return result;
-}
-std::tuple<Tensor,Tensor> max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
- auto result = TypeDefault::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode);
- return result;
-}
std::tuple<Tensor,Tensor> max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxPool2DWithIndicesBackward> grad_fn;
@@ -22417,10 +21179,6 @@
}
return std::forward_as_tuple(out, indices);
}
-Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
- auto result = TypeDefault::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode);
- return result;
-}
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MaxPool3DWithIndicesBackward> grad_fn;
@@ -22978,14 +21736,6 @@
}
return out;
}
-Tensor max_values(const Tensor & self, IntArrayRef dim, bool keepdim) {
- auto result = TypeDefault::max_values(self, dim, keepdim);
- return result;
-}
-Tensor max_values_names(const Tensor & self, DimnameList dim, bool keepdim) {
- auto result = TypeDefault::max_values_names(self, dim, keepdim);
- return result;
-}
Tensor mean(const Tensor & self, c10::optional<ScalarType> dtype) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MeanBackward0> grad_fn;
@@ -23049,10 +21799,6 @@
}
return result;
}
-Tensor mean_names_dim(const Tensor & self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::mean_names_dim(self, dim, keepdim, dtype);
- return result;
-}
Tensor & mean_out_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -23091,10 +21837,6 @@
}
return out;
}
-Tensor & mean_out_names_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) {
- TypeDefault::mean_out_names_out(out, self, dim, keepdim, dtype);
- return out;
-}
std::tuple<Tensor,Tensor> median_dim(const Tensor & self, int64_t dim, bool keepdim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MedianBackward1> grad_fn;
@@ -23131,12 +21873,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> median_names_dim(const Tensor & self, Dimname dim, bool keepdim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::median_names_dim(self, dim, keepdim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
Tensor median(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MedianBackward0> grad_fn;
@@ -23216,14 +21952,6 @@
}
return std::forward_as_tuple(values, indices);
}
-std::tuple<Tensor &,Tensor &> median_out_names_dim_values(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim) {
- TypeDefault::median_out_names_dim_values(values, indices, self, dim, keepdim);
- return std::forward_as_tuple(values, indices);
-}
-std::vector<Tensor> meshgrid(TensorList tensors) {
- auto result = TypeDefault::meshgrid(tensors);
- return result;
-}
std::tuple<Tensor,Tensor> min_dim(const Tensor & self, int64_t dim, bool keepdim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<MinBackward0> grad_fn;
@@ -23260,12 +21988,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> min_names_dim(const Tensor & self, Dimname dim, bool keepdim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::min_names_dim(self, dim, keepdim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
Tensor min_other(const Tensor & self, const Tensor & other) {
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
@@ -23383,10 +22105,6 @@
}
return std::forward_as_tuple(min, min_indices);
}
-std::tuple<Tensor &,Tensor &> min_out_names_dim_min(Tensor & min, Tensor & min_indices, const Tensor & self, Dimname dim, bool keepdim) {
- TypeDefault::min_out_names_dim_min(min, min_indices, self, dim, keepdim);
- return std::forward_as_tuple(min, min_indices);
-}
Tensor & min_out_out(Tensor & out, const Tensor & self, const Tensor & other) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -23433,14 +22151,6 @@
}
return out;
}
-Tensor min_values(const Tensor & self, IntArrayRef dim, bool keepdim) {
- auto result = TypeDefault::min_values(self, dim, keepdim);
- return result;
-}
-Tensor min_values_names(const Tensor & self, DimnameList dim, bool keepdim) {
- auto result = TypeDefault::min_values_names(self, dim, keepdim);
- return result;
-}
std::tuple<Tensor,Tensor,Tensor> miopen_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) {
auto& input_ = unpack(input, "input", 0);
auto& weight_ = unpack(weight, "weight", 1);
@@ -24520,16 +23230,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
-Tensor mkldnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
- auto result = TypeDefault::mkldnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, bias_defined);
- return result;
-}
-std::tuple<Tensor,Tensor> mkldnn_convolution_backward_weights(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor mkldnn_linear(const Tensor & input, const Tensor & weight, const Tensor & bias) {
auto& input_ = unpack(input, "input", 0);
auto& weight_ = unpack(weight, "weight", 1);
@@ -24807,12 +23507,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> mode_dimname(const Tensor & self, Dimname dim, bool keepdim) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::mode_dimname(self, dim, keepdim);
- return std::make_tuple(std::move(values), std::move(indices));
-}
std::tuple<Tensor &,Tensor &> mode_out_values(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) {
auto& values_ = unpack(values, "values", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -24860,18 +23554,6 @@
}
return std::forward_as_tuple(values, indices);
}
-std::tuple<Tensor &,Tensor &> mode_out_dimname_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim) {
- TypeDefault::mode_out_dimname_out(values, indices, self, dim, keepdim);
- return std::forward_as_tuple(values, indices);
-}
-Tensor movedim_intlist(const Tensor & self, IntArrayRef source, IntArrayRef destination) {
- auto result = TypeDefault::movedim_intlist(self, source, destination);
- return result;
-}
-Tensor movedim_int(const Tensor & self, int64_t source, int64_t destination) {
- auto result = TypeDefault::movedim_int(self, source, destination);
- return result;
-}
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
auto& self_ = unpack(self, "self", 0);
auto& target_ = unpack(target, "target", 1);
@@ -25470,10 +24152,6 @@
}
return out;
}
-Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
- auto result = TypeDefault::multilabel_margin_loss(self, target, reduction);
- return result;
-}
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -25687,10 +24365,6 @@
}
return std::forward_as_tuple(output, is_target);
}
-Tensor & multilabel_margin_loss_out_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) {
- TypeDefault::multilabel_margin_loss_out_out(out, self, target, reduction);
- return out;
-}
Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -25888,14 +24562,6 @@
}
return self;
}
-Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) {
- auto result = TypeDefault::narrow(self, dim, start, length);
- return result;
-}
-Tensor narrow_Tensor(const Tensor & self, int64_t dim, const Tensor & start, int64_t length) {
- auto result = TypeDefault::narrow_Tensor(self, dim, start, length);
- return result;
-}
Tensor narrow_copy(const Tensor & self, int64_t dim, int64_t start, int64_t length) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NotImplemented> grad_fn;
@@ -26721,26 +25387,6 @@
}
return out;
}
-Tensor new_empty(const Tensor & self, IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::new_empty(self, size, options);
- return result;
-}
-Tensor new_full(const Tensor & self, IntArrayRef size, Scalar fill_value, const TensorOptions & options) {
- auto result = TypeDefault::new_full(self, size, fill_value, options);
- return result;
-}
-Tensor new_zeros(const Tensor & self, IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::new_zeros(self, size, options);
- return result;
-}
-Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- auto result = TypeDefault::nll_loss(self, target, weight, reduction, ignore_index);
- return result;
-}
-Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- auto result = TypeDefault::nll_loss2d(self, target, weight, reduction, ignore_index);
- return result;
-}
Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -26995,10 +25641,6 @@
}
return std::forward_as_tuple(output, total_weight);
}
-Tensor & nll_loss2d_out_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- TypeDefault::nll_loss2d_out_out(out, self, target, weight, reduction, ignore_index);
- return out;
-}
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -27253,10 +25895,6 @@
}
return std::forward_as_tuple(output, total_weight);
}
-Tensor & nll_loss_out_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- TypeDefault::nll_loss_out_out(out, self, target, weight, reduction, ignore_index);
- return out;
-}
Tensor nonzero(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -27277,10 +25915,6 @@
#endif
return result;
}
-std::vector<Tensor> nonzero_numpy(const Tensor & self) {
- auto result = TypeDefault::nonzero_numpy(self);
- return result;
-}
Tensor & nonzero_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -27445,18 +26079,6 @@
}
return result;
}
-Tensor norm_names_ScalarOpt_dim_dtype(const Tensor & self, c10::optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) {
- auto result = TypeDefault::norm_names_ScalarOpt_dim_dtype(self, p, dim, keepdim, dtype);
- return result;
-}
-Tensor norm_names_ScalarOpt_dim(const Tensor & self, c10::optional<Scalar> p, DimnameList dim, bool keepdim) {
- auto result = TypeDefault::norm_names_ScalarOpt_dim(self, p, dim, keepdim);
- return result;
-}
-Tensor norm_except_dim(const Tensor & v, int64_t pow, int64_t dim) {
- auto result = TypeDefault::norm_except_dim(v, pow, dim);
- return result;
-}
Tensor & norm_out_dtype_out(Tensor & out, const Tensor & self, c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -27533,14 +26155,6 @@
}
return out;
}
-Tensor & norm_out_names_dtype_out(Tensor & out, const Tensor & self, c10::optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) {
- TypeDefault::norm_out_names_dtype_out(out, self, p, dim, keepdim, dtype);
- return out;
-}
-Tensor & norm_out_names_out(Tensor & out, const Tensor & self, c10::optional<Scalar> p, DimnameList dim, bool keepdim) {
- TypeDefault::norm_out_names_out(out, self, p, dim, keepdim);
- return out;
-}
Tensor normal_Tensor_float(const Tensor & mean, double std, c10::optional<Generator> generator) {
auto& mean_ = unpack(mean, "mean", 0);
std::shared_ptr<NormalBackward1> grad_fn;
@@ -27637,10 +26251,6 @@
}
return result;
}
-Tensor normal_float_float(double mean, double std, IntArrayRef size, c10::optional<Generator> generator, const TensorOptions & options) {
- auto result = TypeDefault::normal_float_float(mean, std, size, generator, options);
- return result;
-}
Tensor & normal_(Tensor & self, double mean, double std, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -27792,50 +26402,6 @@
}
return out;
}
-Tensor & normal_out_float_float_out(Tensor & out, double mean, double std, IntArrayRef size, c10::optional<Generator> generator) {
- TypeDefault::normal_out_float_float_out(out, mean, std, size, generator);
- return out;
-}
-Tensor nuclear_norm(const Tensor & self, bool keepdim) {
- auto result = TypeDefault::nuclear_norm(self, keepdim);
- return result;
-}
-Tensor nuclear_norm_dim(const Tensor & self, IntArrayRef dim, bool keepdim) {
- auto result = TypeDefault::nuclear_norm_dim(self, dim, keepdim);
- return result;
-}
-Tensor & nuclear_norm_out_out(Tensor & out, const Tensor & self, bool keepdim) {
- TypeDefault::nuclear_norm_out_out(out, self, keepdim);
- return out;
-}
-Tensor & nuclear_norm_out_dim_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim) {
- TypeDefault::nuclear_norm_out_dim_out(out, self, dim, keepdim);
- return out;
-}
-Tensor numpy_T(const Tensor & self) {
- auto result = TypeDefault::numpy_T(self);
- return result;
-}
-Tensor one_hot(const Tensor & self, int64_t num_classes) {
- auto result = TypeDefault::one_hot(self, num_classes);
- return result;
-}
-Tensor ones_names(IntArrayRef size, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::ones_names(size, names, options);
- return result;
-}
-Tensor ones(IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::ones(size, options);
- return result;
-}
-Tensor ones_like(const Tensor & self, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::ones_like(self, options, memory_format);
- return result;
-}
-Tensor & ones_out_out(Tensor & out, IntArrayRef size) {
- TypeDefault::ones_out_out(out, size);
- return out;
-}
Tensor orgqr(const Tensor & self, const Tensor & input2) {
auto& self_ = unpack(self, "self", 0);
auto& input2_ = unpack(input2, "input2", 1);
@@ -28016,14 +26582,6 @@
}
return out;
}
-Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim) {
- auto result = TypeDefault::pairwise_distance(x1, x2, p, eps, keepdim);
- return result;
-}
-Tensor pdist(const Tensor & self, double p) {
- auto result = TypeDefault::pdist(self, p);
- return result;
-}
Tensor permute(const Tensor & self, IntArrayRef dims) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PermuteBackward> grad_fn;
@@ -28060,18 +26618,6 @@
}
return result;
}
-Tensor pin_memory(const Tensor & self) {
- auto result = TypeDefault::pin_memory(self);
- return result;
-}
-Tensor pinverse(const Tensor & self, double rcond) {
- auto result = TypeDefault::pinverse(self, rcond);
- return result;
-}
-Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor) {
- auto result = TypeDefault::pixel_shuffle(self, upscale_factor);
- return result;
-}
Tensor poisson(const Tensor & self, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<PoissonBackward> grad_fn;
@@ -28101,10 +26647,6 @@
}
return result;
}
-Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
- auto result = TypeDefault::poisson_nll_loss(input, target, log_input, full, eps, reduction);
- return result;
-}
Tensor polygamma(int64_t n, const Tensor & self) {
auto& self_ = unpack(self, "self", 1);
std::shared_ptr<PolygammaBackward> grad_fn;
@@ -28135,10 +26677,6 @@
}
return result;
}
-Tensor & polygamma_(Tensor & self, int64_t n) {
- TypeDefault::polygamma_(self, n);
- return self;
-}
Tensor & polygamma_out_out(Tensor & out, int64_t n, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 2);
@@ -28629,10 +27167,6 @@
}
return result;
}
-Tensor prod_dim_Dimname(const Tensor & self, Dimname dim, bool keepdim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::prod_dim_Dimname(self, dim, keepdim, dtype);
- return result;
-}
Tensor & prod_out_int_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim, c10::optional<ScalarType> dtype) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -28671,14 +27205,6 @@
}
return out;
}
-Tensor & prod_out_Dimname_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim, c10::optional<ScalarType> dtype) {
- TypeDefault::prod_out_Dimname_out(out, self, dim, keepdim, dtype);
- return out;
-}
-ScalarType promote_types(ScalarType type1, ScalarType type2) {
- auto result = TypeDefault::promote_types(type1, type2);
- return result;
-}
Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 1);
@@ -29108,16 +27634,6 @@
}
return result;
}
-Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
- auto result = TypeDefault::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return result;
-}
-std::tuple<Tensor,Tensor> quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NotImplemented> grad_fn;
@@ -29146,14 +27662,6 @@
}
return result;
}
-Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
- auto result = TypeDefault::quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return result;
-}
-Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) {
- auto result = TypeDefault::quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
- return result;
-}
Tensor rad2deg(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<Rad2DegBackward> grad_fn;
@@ -29249,102 +27757,6 @@
}
return out;
}
-Tensor rand_names(IntArrayRef size, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::rand_names(size, names, options);
- return result;
-}
-Tensor rand_generator_with_names(IntArrayRef size, c10::optional<Generator> generator, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::rand_generator_with_names(size, generator, names, options);
- return result;
-}
-Tensor rand(IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::rand(size, options);
- return result;
-}
-Tensor rand_generator(IntArrayRef size, c10::optional<Generator> generator, const TensorOptions & options) {
- auto result = TypeDefault::rand_generator(size, generator, options);
- return result;
-}
-Tensor rand_like(const Tensor & self, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::rand_like(self, options, memory_format);
- return result;
-}
-Tensor & rand_out_out(Tensor & out, IntArrayRef size) {
- TypeDefault::rand_out_out(out, size);
- return out;
-}
-Tensor & rand_out_generator_out(Tensor & out, IntArrayRef size, c10::optional<Generator> generator) {
- TypeDefault::rand_out_generator_out(out, size, generator);
- return out;
-}
-Tensor randint(int64_t high, IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::randint(high, size, options);
- return result;
-}
-Tensor randint_generator(int64_t high, IntArrayRef size, c10::optional<Generator> generator, const TensorOptions & options) {
- auto result = TypeDefault::randint_generator(high, size, generator, options);
- return result;
-}
-Tensor randint_low(int64_t low, int64_t high, IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::randint_low(low, high, size, options);
- return result;
-}
-Tensor randint_low_generator(int64_t low, int64_t high, IntArrayRef size, c10::optional<Generator> generator, const TensorOptions & options) {
- auto result = TypeDefault::randint_low_generator(low, high, size, generator, options);
- return result;
-}
-Tensor randint_like(const Tensor & self, int64_t high, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::randint_like(self, high, options, memory_format);
- return result;
-}
-Tensor randint_like_low_dtype(const Tensor & self, int64_t low, int64_t high, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::randint_like_low_dtype(self, low, high, options, memory_format);
- return result;
-}
-Tensor & randint_out_out(Tensor & out, int64_t high, IntArrayRef size) {
- TypeDefault::randint_out_out(out, high, size);
- return out;
-}
-Tensor & randint_out_generator_out(Tensor & out, int64_t high, IntArrayRef size, c10::optional<Generator> generator) {
- TypeDefault::randint_out_generator_out(out, high, size, generator);
- return out;
-}
-Tensor & randint_out_low_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size) {
- TypeDefault::randint_out_low_out(out, low, high, size);
- return out;
-}
-Tensor & randint_out_low_generator_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size, c10::optional<Generator> generator) {
- TypeDefault::randint_out_low_generator_out(out, low, high, size, generator);
- return out;
-}
-Tensor randn(IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::randn(size, options);
- return result;
-}
-Tensor randn_generator(IntArrayRef size, c10::optional<Generator> generator, const TensorOptions & options) {
- auto result = TypeDefault::randn_generator(size, generator, options);
- return result;
-}
-Tensor randn_names(IntArrayRef size, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::randn_names(size, names, options);
- return result;
-}
-Tensor randn_generator_with_names(IntArrayRef size, c10::optional<Generator> generator, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::randn_generator_with_names(size, generator, names, options);
- return result;
-}
-Tensor randn_like(const Tensor & self, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::randn_like(self, options, memory_format);
- return result;
-}
-Tensor & randn_out_out(Tensor & out, IntArrayRef size) {
- TypeDefault::randn_out_out(out, size);
- return out;
-}
-Tensor & randn_out_generator_out(Tensor & out, IntArrayRef size, c10::optional<Generator> generator) {
- TypeDefault::randn_out_generator_out(out, size, generator);
- return out;
-}
Tensor & random__from(Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -29432,18 +27844,6 @@
}
return self;
}
-Tensor randperm(int64_t n, const TensorOptions & options) {
- auto result = TypeDefault::randperm(n, options);
- return result;
-}
-Tensor randperm_generator(int64_t n, c10::optional<Generator> generator, const TensorOptions & options) {
- auto result = TypeDefault::randperm_generator(n, generator, options);
- return result;
-}
-Tensor & randperm_out_out(Tensor & out, int64_t n) {
- TypeDefault::randperm_out_out(out, n);
- return out;
-}
Tensor & randperm_out_generator_out(Tensor & out, int64_t n, c10::optional<Generator> generator) {
auto& out_ = unpack(out, "out", 0);
#ifndef NDEBUG
@@ -29464,14 +27864,6 @@
increment_version(out);
return out;
}
-Tensor range_step(Scalar start, Scalar end, Scalar step, const TensorOptions & options) {
- auto result = TypeDefault::range_step(start, end, step, options);
- return result;
-}
-Tensor range(Scalar start, Scalar end, const TensorOptions & options) {
- auto result = TypeDefault::range(start, end, options);
- return result;
-}
Tensor & range_out_out(Tensor & out, Scalar start, Scalar end, Scalar step) {
auto& out_ = unpack(out, "out", 0);
#ifndef NDEBUG
@@ -29492,10 +27884,6 @@
increment_version(out);
return out;
}
-Tensor real(const Tensor & self) {
- auto result = TypeDefault::real(self);
- return result;
-}
Tensor reciprocal(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReciprocalBackward> grad_fn;
@@ -29597,10 +27985,6 @@
}
return out;
}
-Tensor refine_names(const Tensor & self, DimnameList names) {
- auto result = TypeDefault::refine_names(self, names);
- return result;
-}
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReflectionPad1DBackward> grad_fn;
@@ -30182,14 +28566,6 @@
}
return out;
}
-Tensor rename(const Tensor & self, c10::optional<DimnameList> names) {
- auto result = TypeDefault::rename(self, names);
- return result;
-}
-Tensor & rename_(Tensor & self, c10::optional<DimnameList> names) {
- TypeDefault::rename_(self, names);
- return self;
-}
Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<RenormBackward> grad_fn;
@@ -30351,14 +28727,6 @@
}
return result;
}
-Tensor repeat_interleave_self_Tensor(const Tensor & self, const Tensor & repeats, c10::optional<int64_t> dim) {
- auto result = TypeDefault::repeat_interleave_self_Tensor(self, repeats, dim);
- return result;
-}
-Tensor repeat_interleave_self_int(const Tensor & self, int64_t repeats, c10::optional<int64_t> dim) {
- auto result = TypeDefault::repeat_interleave_self_int(self, repeats, dim);
- return result;
-}
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ReplicationPad1DBackward> grad_fn;
@@ -30815,66 +29183,6 @@
}
return out;
}
-Tensor reshape(const Tensor & self, IntArrayRef shape) {
- auto result = TypeDefault::reshape(self, shape);
- return result;
-}
-Tensor reshape_as(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::reshape_as(self, other);
- return result;
-}
-ScalarType result_type_Tensor(const Tensor & tensor, const Tensor & other) {
- auto result = TypeDefault::result_type_Tensor(tensor, other);
- return result;
-}
-ScalarType result_type_Scalar(const Tensor & tensor, Scalar other) {
- auto result = TypeDefault::result_type_Scalar(tensor, other);
- return result;
-}
-ScalarType result_type_Scalar_Tensor(Scalar scalar, const Tensor & tensor) {
- auto result = TypeDefault::result_type_Scalar_Tensor(scalar, tensor);
- return result;
-}
-ScalarType result_type_Scalar_Scalar(Scalar scalar1, Scalar scalar2) {
- auto result = TypeDefault::result_type_Scalar_Scalar(scalar1, scalar2);
- return result;
-}
-Tensor rfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) {
- auto result = TypeDefault::rfft(self, signal_ndim, normalized, onesided);
- return result;
-}
-std::tuple<Tensor,Tensor> rnn_relu_input(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::rnn_relu_input(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-std::tuple<Tensor,Tensor> rnn_relu_data(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::rnn_relu_data(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) {
- auto result = TypeDefault::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
- return result;
-}
-std::tuple<Tensor,Tensor> rnn_tanh_input(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::rnn_tanh_input(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-std::tuple<Tensor,Tensor> rnn_tanh_data(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::rnn_tanh_data(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
-Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) {
- auto result = TypeDefault::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh);
- return result;
-}
Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<RollBackward> grad_fn;
@@ -31030,14 +29338,6 @@
}
return out;
}
-Tensor rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, c10::optional<Generator> generator) {
- auto result = TypeDefault::rrelu(self, lower, upper, training, generator);
- return result;
-}
-Tensor & rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, c10::optional<Generator> generator) {
- TypeDefault::rrelu_(self, lower, upper, training, generator);
- return self;
-}
Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<Generator> generator) {
auto& self_ = unpack(self, "self", 0);
auto& noise_ = unpack(noise, "noise", 1);
@@ -31388,26 +29688,6 @@
}
return result;
}
-Tensor scalar_tensor(Scalar s, const TensorOptions & options) {
- auto result = TypeDefault::scalar_tensor(s, options);
- return result;
-}
-Tensor scatter_src(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) {
- auto result = TypeDefault::scatter_src(self, dim, index, src);
- return result;
-}
-Tensor scatter_value(const Tensor & self, int64_t dim, const Tensor & index, Scalar value) {
- auto result = TypeDefault::scatter_value(self, dim, index, value);
- return result;
-}
-Tensor scatter_dimname_src(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src) {
- auto result = TypeDefault::scatter_dimname_src(self, dim, index, src);
- return result;
-}
-Tensor scatter_dimname_value(const Tensor & self, Dimname dim, const Tensor & index, Scalar value) {
- auto result = TypeDefault::scatter_dimname_value(self, dim, index, value);
- return result;
-}
Tensor & scatter__src(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -31576,14 +29856,6 @@
}
return self;
}
-Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) {
- auto result = TypeDefault::scatter_add(self, dim, index, src);
- return result;
-}
-Tensor scatter_add_dimname(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src) {
- auto result = TypeDefault::scatter_add_dimname(self, dim, index, src);
- return result;
-}
Tensor & scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) {
auto& self_ = unpack(self, "self", 0);
auto& index_ = unpack(index, "index", 2);
@@ -31743,10 +30015,6 @@
}
return out;
}
-Tensor select_Dimname(const Tensor & self, Dimname dim, int64_t index) {
- auto result = TypeDefault::select_Dimname(self, dim, index);
- return result;
-}
Tensor select_int(const Tensor & self, int64_t dim, int64_t index) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SelectBackward> grad_fn;
@@ -31784,14 +30052,6 @@
}
return result;
}
-Tensor selu(const Tensor & self) {
- auto result = TypeDefault::selu(self);
- return result;
-}
-Tensor & selu_(Tensor & self) {
- TypeDefault::selu_(self);
- return self;
-}
Tensor & set__source_Storage(Tensor & self, Storage source) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -32178,10 +30438,6 @@
}
return out;
}
-Tensor signbit(const Tensor & self) {
- auto result = TypeDefault::signbit(self);
- return result;
-}
Tensor & signbit_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -32278,10 +30534,6 @@
}
return self;
}
-Tensor silu_backward(const Tensor & grad_output, const Tensor & self) {
- auto result = TypeDefault::silu_backward(grad_output, self);
- return result;
-}
Tensor & silu_out_out(Tensor & out, const Tensor & self) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -32514,14 +30766,6 @@
}
return out;
}
-int64_t size_int(const Tensor & self, int64_t dim) {
- auto result = TypeDefault::size_int(self, dim);
- return result;
-}
-int64_t size_Dimname(const Tensor & self, Dimname dim) {
- auto result = TypeDefault::size_Dimname(self, dim);
- return result;
-}
Tensor slice_Tensor(const Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SliceBackward> grad_fn;
@@ -32596,10 +30840,6 @@
}
return std::make_tuple(std::move(sign), std::move(logabsdet));
}
-Tensor slow_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
- auto result = TypeDefault::slow_conv3d(self, weight, kernel_size, bias, stride, padding);
- return result;
-}
std::tuple<Tensor,Tensor,Tensor> slow_conv3d_backward_output_mask(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -32886,10 +31126,6 @@
}
return std::forward_as_tuple(output, finput, fgrad_input);
}
-Tensor & slow_conv3d_out_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
- TypeDefault::slow_conv3d_out_out(out, self, weight, kernel_size, bias, stride, padding);
- return out;
-}
Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
auto& self_ = unpack(self, "self", 0);
auto& weight_ = unpack(weight, "weight", 1);
@@ -33626,10 +31862,6 @@
}
return out;
}
-Tensor smm(const Tensor & self, const Tensor & mat2) {
- auto result = TypeDefault::smm(self, mat2);
- return result;
-}
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
auto& self_ = unpack(self, "self", 0);
auto& target_ = unpack(target, "target", 1);
@@ -34008,14 +32240,6 @@
}
return out;
}
-Tensor softmax_int(const Tensor & self, int64_t dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::softmax_int(self, dim, dtype);
- return result;
-}
-Tensor softmax_Dimname(const Tensor & self, Dimname dim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::softmax_Dimname(self, dim, dtype);
- return result;
-}
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SoftplusBackward> grad_fn;
@@ -34476,12 +32700,6 @@
}
return std::make_tuple(std::move(values), std::move(indices));
}
-std::tuple<Tensor,Tensor> sort_dimname(const Tensor & self, Dimname dim, bool descending) {
- Tensor values;
- Tensor indices;
- std::tie(values, indices) = TypeDefault::sort_dimname(self, dim, descending);
- return std::make_tuple(std::move(values), std::move(indices));
-}
std::tuple<Tensor &,Tensor &> sort_out_values(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) {
auto& values_ = unpack(values, "values", 0);
auto& indices_ = unpack(indices, "indices", 1);
@@ -34529,22 +32747,6 @@
}
return std::forward_as_tuple(values, indices);
}
-std::tuple<Tensor &,Tensor &> sort_out_dimname_values(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool descending) {
- TypeDefault::sort_out_dimname_values(values, indices, self, dim, descending);
- return std::forward_as_tuple(values, indices);
-}
-Tensor sparse_coo_tensor_size(IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::sparse_coo_tensor_size(size, options);
- return result;
-}
-Tensor sparse_coo_tensor_indices(const Tensor & indices, const Tensor & values, const TensorOptions & options) {
- auto result = TypeDefault::sparse_coo_tensor_indices(indices, values, options);
- return result;
-}
-Tensor sparse_coo_tensor_indices_size(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::sparse_coo_tensor_indices_size(indices, values, size, options);
- return result;
-}
int64_t sparse_dim(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
#ifndef NDEBUG
@@ -34825,14 +33027,6 @@
}
return out;
}
-Tensor square(const Tensor & self) {
- auto result = TypeDefault::square(self);
- return result;
-}
-Tensor & square_(Tensor & self) {
- TypeDefault::square_(self);
- return self;
-}
Tensor squeeze(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SqueezeBackward0> grad_fn;
@@ -34904,10 +33098,6 @@
}
return result;
}
-Tensor squeeze_dimname(const Tensor & self, Dimname dim) {
- auto result = TypeDefault::squeeze_dimname(self, dim);
- return result;
-}
Tensor & squeeze_(Tensor & self) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -34969,14 +33159,6 @@
}
return self;
}
-Tensor & squeeze__dimname(Tensor & self, Dimname dim) {
- TypeDefault::squeeze__dimname(self, dim);
- return self;
-}
-Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) {
- auto result = TypeDefault::sspaddmm(self, mat1, mat2, beta, alpha);
- return result;
-}
Tensor & sspaddmm_out_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -35184,10 +33366,6 @@
}
return result;
}
-Tensor std_names_dim(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) {
- auto result = TypeDefault::std_names_dim(self, dim, unbiased, keepdim);
- return result;
-}
std::tuple<Tensor,Tensor> std_mean(const Tensor & self, bool unbiased) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<StdMeanBackward1> grad_fn;
@@ -35262,12 +33440,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-std::tuple<Tensor,Tensor> std_mean_names_dim(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::std_mean_names_dim(self, dim, unbiased, keepdim);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor & std_out_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -35306,22 +33478,6 @@
}
return out;
}
-Tensor & std_out_names_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) {
- TypeDefault::std_out_names_out(out, self, dim, unbiased, keepdim);
- return out;
-}
-Tensor stft(const Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const Tensor & window, bool normalized, bool onesided) {
- auto result = TypeDefault::stft(self, n_fft, hop_length, win_length, window, normalized, onesided);
- return result;
-}
-int64_t stride_int(const Tensor & self, int64_t dim) {
- auto result = TypeDefault::stride_int(self, dim);
- return result;
-}
-int64_t stride_Dimname(const Tensor & self, Dimname dim) {
- auto result = TypeDefault::stride_Dimname(self, dim);
- return result;
-}
Tensor sub_Tensor(const Tensor & self, const Tensor & other, Scalar alpha) {
auto& self_ = unpack(self, "self", 0);
auto& other_ = unpack(other, "other", 1);
@@ -35560,10 +33716,6 @@
}
return result;
}
-Tensor sum_dim_DimnameList(const Tensor & self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) {
- auto result = TypeDefault::sum_dim_DimnameList(self, dim, keepdim, dtype);
- return result;
-}
Tensor & sum_out_IntList_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -35602,14 +33754,6 @@
}
return out;
}
-Tensor & sum_out_DimnameList_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype) {
- TypeDefault::sum_out_DimnameList_out(out, self, dim, keepdim, dtype);
- return out;
-}
-Tensor sum_to_size(const Tensor & self, IntArrayRef size) {
- auto result = TypeDefault::sum_to_size(self, size);
- return result;
-}
std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self, bool some, bool compute_uv) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<SvdBackward> grad_fn;
@@ -36222,14 +34366,6 @@
}
return out;
}
-Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other) {
- auto result = TypeDefault::tensordot(self, other, dims_self, dims_other);
- return result;
-}
-Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
- auto result = TypeDefault::thnn_conv2d(self, weight, kernel_size, bias, stride, padding);
- return result;
-}
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward_output_mask(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -36516,14 +34652,6 @@
}
return std::forward_as_tuple(output, finput, fgrad_input);
}
-Tensor & thnn_conv2d_out_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
- TypeDefault::thnn_conv2d_out_out(out, self, weight, kernel_size, bias, stride, padding);
- return out;
-}
-Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
- auto result = TypeDefault::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation);
- return result;
-}
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward_output_mask(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) {
auto& grad_output_ = unpack(grad_output, "grad_output", 0);
auto& self_ = unpack(self, "self", 1);
@@ -36744,10 +34872,6 @@
}
return out;
}
-Tensor & thnn_conv_depthwise2d_out_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
- TypeDefault::thnn_conv_depthwise2d_out_out(out, self, weight, kernel_size, bias, stride, padding, dilation);
- return out;
-}
Tensor threshold(const Tensor & self, Scalar threshold, Scalar value) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ThresholdBackward0> grad_fn;
@@ -36887,22 +35011,6 @@
}
return out;
}
-Tensor to_dtype_layout(const Tensor & self, const TensorOptions & options, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::to_dtype_layout(self, options, non_blocking, copy, memory_format);
- return result;
-}
-Tensor to_device(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::to_device(self, device, dtype, non_blocking, copy, memory_format);
- return result;
-}
-Tensor to_dtype(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::to_dtype(self, dtype, non_blocking, copy, memory_format);
- return result;
-}
-Tensor to_other(const Tensor & self, const Tensor & other, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::to_other(self, other, non_blocking, copy, memory_format);
- return result;
-}
Tensor to_dense(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ToDenseBackward> grad_fn;
@@ -36932,10 +35040,6 @@
}
return result;
}
-Tensor to_dense_backward(const Tensor & grad, const Tensor & input) {
- auto result = TypeDefault::to_dense_backward(grad, input);
- return result;
-}
Tensor to_mkldnn(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ToMkldnnBackward> grad_fn;
@@ -36965,10 +35069,6 @@
}
return result;
}
-Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input) {
- auto result = TypeDefault::to_mkldnn_backward(grad, input);
- return result;
-}
Tensor to_sparse_sparse_dim(const Tensor & self, int64_t sparse_dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<NotImplemented> grad_fn;
@@ -37172,10 +35272,6 @@
}
return result;
}
-Tensor transpose_Dimname(const Tensor & self, Dimname dim0, Dimname dim1) {
- auto result = TypeDefault::transpose_Dimname(self, dim0, dim1);
- return result;
-}
Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -37207,14 +35303,6 @@
}
return self;
}
-Tensor trapz_x(const Tensor & y, const Tensor & x, int64_t dim) {
- auto result = TypeDefault::trapz_x(y, x, dim);
- return result;
-}
-Tensor trapz_dx(const Tensor & y, double dx, int64_t dim) {
- auto result = TypeDefault::trapz_dx(y, dx, dim);
- return result;
-}
std::tuple<Tensor,Tensor> triangular_solve(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) {
auto& self_ = unpack(self, "self", 0);
auto& A_ = unpack(A, "A", 1);
@@ -37422,10 +35510,6 @@
}
return out;
}
-Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
- auto result = TypeDefault::triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction);
- return result;
-}
Tensor triu(const Tensor & self, int64_t diagonal) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<TriuBackward> grad_fn;
@@ -37813,10 +35897,6 @@
}
return out;
}
-Tensor type_as(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::type_as(self, other);
- return result;
-}
std::vector<Tensor> unbind_int(const Tensor & self, int64_t dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UnbindBackward> grad_fn;
@@ -37846,18 +35926,6 @@
}
return result;
}
-std::vector<Tensor> unbind_Dimname(const Tensor & self, Dimname dim) {
- auto result = TypeDefault::unbind_Dimname(self, dim);
- return result;
-}
-Tensor unflatten_Dimname(const Tensor & self, Dimname dim, IntArrayRef sizes, DimnameList names) {
- auto result = TypeDefault::unflatten_Dimname(self, dim, sizes, names);
- return result;
-}
-Tensor unflatten_int(const Tensor & self, int64_t dim, IntArrayRef sizes, DimnameList names) {
- auto result = TypeDefault::unflatten_int(self, dim, sizes, names);
- return result;
-}
Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UnfoldBackward> grad_fn;
@@ -38049,10 +36117,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1), std::move(result2));
}
-std::vector<Tensor> unsafe_chunk(const Tensor & self, int64_t chunks, int64_t dim) {
- auto result = TypeDefault::unsafe_chunk(self, chunks, dim);
- return result;
-}
std::vector<Tensor> unsafe_split_Tensor(const Tensor & self, int64_t split_size, int64_t dim) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<UnsafeSplitBackward> grad_fn;
@@ -39634,10 +37698,6 @@
}
return result;
}
-Tensor vander(const Tensor & x, c10::optional<int64_t> N, bool increasing) {
- auto result = TypeDefault::vander(x, N, increasing);
- return result;
-}
Tensor var(const Tensor & self, bool unbiased) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<VarBackward0> grad_fn;
@@ -39700,10 +37760,6 @@
}
return result;
}
-Tensor var_names_dim(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) {
- auto result = TypeDefault::var_names_dim(self, dim, unbiased, keepdim);
- return result;
-}
std::tuple<Tensor,Tensor> var_mean(const Tensor & self, bool unbiased) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<VarMeanBackward1> grad_fn;
@@ -39778,12 +37834,6 @@
}
return std::make_tuple(std::move(result0), std::move(result1));
}
-std::tuple<Tensor,Tensor> var_mean_names_dim(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) {
- Tensor result0;
- Tensor result1;
- std::tie(result0, result1) = TypeDefault::var_mean_names_dim(self, dim, unbiased, keepdim);
- return std::make_tuple(std::move(result0), std::move(result1));
-}
Tensor & var_out_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) {
auto& out_ = unpack(out, "out", 0);
auto& self_ = unpack(self, "self", 1);
@@ -39822,10 +37872,6 @@
}
return out;
}
-Tensor & var_out_names_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) {
- TypeDefault::var_out_names_out(out, self, dim, unbiased, keepdim);
- return out;
-}
Tensor view(const Tensor & self, IntArrayRef size) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ViewBackward> grad_fn;
@@ -39862,10 +37908,6 @@
}
return result;
}
-Tensor view_as(const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::view_as(self, other);
- return result;
-}
Tensor view_as_complex(const Tensor & self) {
auto& self_ = unpack(self, "self", 0);
std::shared_ptr<ViewAsComplexBackward> grad_fn;
@@ -39934,26 +37976,6 @@
}
return result;
}
-Tensor where_self(const Tensor & condition, const Tensor & self, const Tensor & other) {
- auto result = TypeDefault::where_self(condition, self, other);
- return result;
-}
-Tensor where_ScalarSelf(const Tensor & condition, Scalar self, const Tensor & other) {
- auto result = TypeDefault::where_ScalarSelf(condition, self, other);
- return result;
-}
-Tensor where_ScalarOther(const Tensor & condition, const Tensor & self, Scalar other) {
- auto result = TypeDefault::where_ScalarOther(condition, self, other);
- return result;
-}
-Tensor where_Scalar(const Tensor & condition, Scalar self, Scalar other) {
- auto result = TypeDefault::where_Scalar(condition, self, other);
- return result;
-}
-std::vector<Tensor> where(const Tensor & condition) {
- auto result = TypeDefault::where(condition);
- return result;
-}
Tensor & zero_(Tensor & self) {
auto& self_ = unpack(self, "self", 0);
check_inplace(self);
@@ -39983,77 +38005,34 @@
}
return self;
}
-Tensor zeros_names(IntArrayRef size, c10::optional<DimnameList> names, const TensorOptions & options) {
- auto result = TypeDefault::zeros_names(size, names, options);
- return result;
-}
-Tensor zeros(IntArrayRef size, const TensorOptions & options) {
- auto result = TypeDefault::zeros(size, options);
- return result;
-}
-Tensor zeros_like(const Tensor & self, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) {
- auto result = TypeDefault::zeros_like(self, options, memory_format);
- return result;
-}
-Tensor & zeros_out_out(Tensor & out, IntArrayRef size) {
- TypeDefault::zeros_out_out(out, size);
- return out;
-}
// }
}
namespace {
TORCH_LIBRARY_IMPL(aten, Autograd, m) {
- m.impl("__and__.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__and___Scalar))
- );
- m.impl("__and__.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__and___Tensor))
- );
- m.impl_UNBOXED("__iand__.Scalar", &VariableType::__iand___Scalar);
- m.impl_UNBOXED("__iand__.Tensor", &VariableType::__iand___Tensor);
m.impl_UNBOXED("__ilshift__.Scalar", &VariableType::__ilshift___Scalar);
m.impl_UNBOXED("__ilshift__.Tensor", &VariableType::__ilshift___Tensor);
- m.impl_UNBOXED("__ior__.Scalar", &VariableType::__ior___Scalar);
- m.impl_UNBOXED("__ior__.Tensor", &VariableType::__ior___Tensor);
m.impl_UNBOXED("__irshift__.Scalar", &VariableType::__irshift___Scalar);
m.impl_UNBOXED("__irshift__.Tensor", &VariableType::__irshift___Tensor);
- m.impl_UNBOXED("__ixor__.Scalar", &VariableType::__ixor___Scalar);
- m.impl_UNBOXED("__ixor__.Tensor", &VariableType::__ixor___Tensor);
m.impl("__lshift__.Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__lshift___Scalar))
);
m.impl("__lshift__.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__lshift___Tensor))
);
- m.impl("__or__.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__or___Scalar))
- );
- m.impl("__or__.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__or___Tensor))
- );
m.impl("__rshift__.Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__rshift___Scalar))
);
m.impl("__rshift__.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__rshift___Tensor))
);
- m.impl("__xor__.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__xor___Scalar))
- );
- m.impl("__xor__.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::__xor___Tensor))
- );
m.impl("_adaptive_avg_pool2d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_adaptive_avg_pool2d))
);
m.impl("_adaptive_avg_pool2d_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_adaptive_avg_pool2d_backward))
);
- m.impl("_add_batch_dim",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_add_batch_dim))
- );
m.impl_UNBOXED("_addmv_impl_", &VariableType::_addmv_impl_);
m.impl("_addr",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_addr))
@@ -40062,37 +38041,10 @@
m.impl_UNBOXED("_addr.out", &VariableType::_addr_out_out);
m.impl_UNBOXED("_amp_non_finite_check_and_unscale_", &VariableType::_amp_non_finite_check_and_unscale_);
m.impl_UNBOXED("_amp_update_scale", &VariableType::_amp_update_scale);
- m.impl_UNBOXED("_baddbmm_mkl_", &VariableType::_baddbmm_mkl_);
- m.impl_UNBOXED("_batch_norm_impl_index", &VariableType::_batch_norm_impl_index);
- m.impl_UNBOXED("_batch_norm_impl_index_backward", &VariableType::_batch_norm_impl_index_backward);
m.impl("_bmm",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_bmm))
);
m.impl_UNBOXED("_bmm.out", &VariableType::_bmm_out_out);
- m.impl("_cast_Byte",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Byte))
- );
- m.impl("_cast_Char",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Char))
- );
- m.impl("_cast_Double",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Double))
- );
- m.impl("_cast_Float",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Float))
- );
- m.impl("_cast_Half",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Half))
- );
- m.impl("_cast_Int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Int))
- );
- m.impl("_cast_Long",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Long))
- );
- m.impl("_cast_Short",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cast_Short))
- );
m.impl("_cat",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cat))
);
@@ -40109,13 +38061,7 @@
m.impl("_cholesky_solve_helper",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cholesky_solve_helper))
);
- m.impl("_choose_qparams_per_tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_choose_qparams_per_tensor))
- );
m.impl_UNBOXED("_coalesced_", &VariableType::_coalesced_);
- m.impl_UNBOXED("_convolution", &VariableType::_convolution);
- m.impl_UNBOXED("_convolution_double_backward", &VariableType::_convolution_double_backward);
- m.impl_UNBOXED("_convolution_nogroup", &VariableType::_convolution_nogroup);
m.impl("_copy_from",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_copy_from))
);
@@ -40136,18 +38082,6 @@
m.impl("_cudnn_rnn_flatten_weight",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cudnn_rnn_flatten_weight))
);
- m.impl("_cufft_clear_plan_cache",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cufft_clear_plan_cache))
- );
- m.impl("_cufft_get_plan_cache_max_size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cufft_get_plan_cache_max_size))
- );
- m.impl("_cufft_get_plan_cache_size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cufft_get_plan_cache_size))
- );
- m.impl("_cufft_set_plan_cache_max_size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cufft_set_plan_cache_max_size))
- );
m.impl_UNBOXED("_cummax_helper", &VariableType::_cummax_helper);
m.impl_UNBOXED("_cummin_helper", &VariableType::_cummin_helper);
m.impl("_cumprod",
@@ -40158,29 +38092,21 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_cumsum))
);
m.impl_UNBOXED("_cumsum.out", &VariableType::_cumsum_out_out);
- m.impl("_debug_has_internal_overlap",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_debug_has_internal_overlap))
- );
m.impl("_dimI",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_dimI))
);
m.impl("_dimV",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_dimV))
);
- m.impl("_dim_arange",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_dim_arange))
- );
m.impl("_dirichlet_grad",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_dirichlet_grad))
);
m.impl_UNBOXED("_embedding_bag", &VariableType::_embedding_bag);
- m.impl_UNBOXED("_embedding_bag_backward", &VariableType::_embedding_bag_backward);
m.impl_UNBOXED("_embedding_bag_dense_backward", &VariableType::_embedding_bag_dense_backward);
m.impl_UNBOXED("_embedding_bag_forward_only", &VariableType::_embedding_bag_forward_only);
m.impl("_embedding_bag_per_sample_weights_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_embedding_bag_per_sample_weights_backward))
);
- m.impl_UNBOXED("_embedding_bag_sparse_backward", &VariableType::_embedding_bag_sparse_backward);
m.impl("_empty_affine_quantized",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_empty_affine_quantized))
);
@@ -40193,25 +38119,13 @@
m.impl("_fake_quantize_learnable_per_channel_affine",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_fake_quantize_learnable_per_channel_affine))
);
- m.impl("_fake_quantize_learnable_per_channel_affine_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_fake_quantize_learnable_per_channel_affine_backward))
- );
m.impl("_fake_quantize_learnable_per_tensor_affine",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_fake_quantize_learnable_per_tensor_affine))
);
- m.impl("_fake_quantize_learnable_per_tensor_affine_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_fake_quantize_learnable_per_tensor_affine_backward))
- );
m.impl("_fft_with_size",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_fft_with_size))
);
m.impl_UNBOXED("_fused_dropout", &VariableType::_fused_dropout);
- m.impl("_gather_sparse_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_gather_sparse_backward))
- );
- m.impl("_has_compatible_shallow_copy_type",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_has_compatible_shallow_copy_type))
- );
m.impl_UNBOXED("_index_copy_", &VariableType::_index_copy_);
m.impl_UNBOXED("_index_put_impl_", &VariableType::_index_put_impl_);
m.impl("_indices",
@@ -40266,31 +38180,13 @@
m.impl("_multinomial_alias_setup",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_multinomial_alias_setup))
);
- m.impl("_nnpack_available",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_nnpack_available))
- );
m.impl_UNBOXED("_nnpack_spatial_convolution", &VariableType::_nnpack_spatial_convolution);
- m.impl("_nnpack_spatial_convolution_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_nnpack_spatial_convolution_backward))
- );
- m.impl("_nnpack_spatial_convolution_backward_input",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_nnpack_spatial_convolution_backward_input))
- );
- m.impl("_nnpack_spatial_convolution_backward_weight",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_nnpack_spatial_convolution_backward_weight))
- );
m.impl("_nnz",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_nnz))
);
m.impl("_pack_padded_sequence",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_pack_padded_sequence))
);
- m.impl("_pack_padded_sequence_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_pack_padded_sequence_backward))
- );
- m.impl("_pad_packed_sequence",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_pad_packed_sequence))
- );
m.impl("_pdist_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_pdist_backward))
);
@@ -40300,25 +38196,10 @@
m.impl("_qr_helper",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_qr_helper))
);
- m.impl("_remove_batch_dim",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_remove_batch_dim))
- );
- m.impl("_reshape_from_tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_reshape_from_tensor))
- );
m.impl("_s_where",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_s_where))
);
m.impl_UNBOXED("_sample_dirichlet", &VariableType::_sample_dirichlet);
- m.impl("_shape_as_tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_shape_as_tensor))
- );
- m.impl("_sobol_engine_draw",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sobol_engine_draw))
- );
- m.impl_UNBOXED("_sobol_engine_ff_", &VariableType::_sobol_engine_ff_);
- m.impl_UNBOXED("_sobol_engine_initialize_state_", &VariableType::_sobol_engine_initialize_state_);
- m.impl_UNBOXED("_sobol_engine_scramble_", &VariableType::_sobol_engine_scramble_);
m.impl("_softmax",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_softmax))
);
@@ -40331,50 +38212,27 @@
m.impl("_sparse_addmm",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_addmm))
);
- m.impl("_sparse_coo_tensor_unsafe",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_coo_tensor_unsafe))
- );
m.impl("_sparse_coo_tensor_with_dims",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_coo_tensor_with_dims))
);
m.impl("_sparse_coo_tensor_with_dims_and_tensors",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_coo_tensor_with_dims_and_tensors))
);
- m.impl("_sparse_log_softmax.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_log_softmax_int))
- );
- m.impl_UNBOXED("_sparse_log_softmax.Dimname", &VariableType::_sparse_log_softmax_Dimname);
m.impl("_sparse_log_softmax",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_log_softmax))
);
m.impl("_sparse_log_softmax_backward_data",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_log_softmax_backward_data))
);
- m.impl("_sparse_mm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_mm))
- );
- m.impl("_sparse_softmax.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_softmax_int))
- );
- m.impl_UNBOXED("_sparse_softmax.Dimname", &VariableType::_sparse_softmax_Dimname);
m.impl("_sparse_softmax",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_softmax))
);
m.impl("_sparse_softmax_backward_data",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_softmax_backward_data))
);
- m.impl("_sparse_sum",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_sum))
- );
- m.impl("_sparse_sum.dtype",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_sum_dtype))
- );
m.impl("_sparse_sum.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_sum_dim))
);
- m.impl("_sparse_sum.dim_dtype",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_sum_dim_dtype))
- );
m.impl("_sparse_sum_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_sparse_sum_backward))
);
@@ -40393,11 +38251,6 @@
);
m.impl_UNBOXED("_test_optional_floatlist", &VariableType::_test_optional_floatlist);
m.impl_UNBOXED("_test_optional_intlist", &VariableType::_test_optional_intlist);
- m.impl("_test_serialization_subcmul",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_test_serialization_subcmul))
- );
- m.impl_UNBOXED("_thnn_differentiable_gru_cell_backward", &VariableType::_thnn_differentiable_gru_cell_backward);
- m.impl_UNBOXED("_thnn_differentiable_lstm_cell_backward", &VariableType::_thnn_differentiable_lstm_cell_backward);
m.impl_UNBOXED("_thnn_fused_gru_cell", &VariableType::_thnn_fused_gru_cell);
m.impl("_thnn_fused_gru_cell_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_thnn_fused_gru_cell_backward))
@@ -40422,28 +38275,18 @@
m.impl("_use_cudnn_ctc_loss",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_use_cudnn_ctc_loss))
);
- m.impl("_use_cudnn_rnn_flatten_weight",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_use_cudnn_rnn_flatten_weight))
- );
- m.impl_UNBOXED("_validate_sparse_coo_tensor_args", &VariableType::_validate_sparse_coo_tensor_args);
m.impl("_values",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_values))
);
m.impl("_var",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_var))
);
- m.impl("_weight_norm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_weight_norm))
- );
m.impl("_weight_norm_cuda_interface",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_weight_norm_cuda_interface))
);
m.impl("_weight_norm_cuda_interface_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_weight_norm_cuda_interface_backward))
);
- m.impl("_weight_norm_differentiable_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::_weight_norm_differentiable_backward))
- );
m.impl("abs",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::abs))
);
@@ -40464,12 +38307,6 @@
);
m.impl_UNBOXED("acosh_", &VariableType::acosh_);
m.impl_UNBOXED("acosh.out", &VariableType::acosh_out_out);
- m.impl("adaptive_avg_pool1d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::adaptive_avg_pool1d))
- );
- m.impl("adaptive_avg_pool2d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::adaptive_avg_pool2d))
- );
m.impl_UNBOXED("adaptive_avg_pool2d.out", &VariableType::adaptive_avg_pool2d_out_out);
m.impl("adaptive_avg_pool3d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::adaptive_avg_pool3d))
@@ -40479,9 +38316,6 @@
);
m.impl_UNBOXED("adaptive_avg_pool3d_backward.grad_input", &VariableType::adaptive_avg_pool3d_backward_out_grad_input);
m.impl_UNBOXED("adaptive_avg_pool3d.out", &VariableType::adaptive_avg_pool3d_out_out);
- m.impl("adaptive_max_pool1d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::adaptive_max_pool1d))
- );
m.impl("adaptive_max_pool2d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::adaptive_max_pool2d))
);
@@ -40545,36 +38379,16 @@
m.impl("affine_grid_generator",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::affine_grid_generator))
);
- m.impl("affine_grid_generator_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::affine_grid_generator_backward))
- );
m.impl("alias",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::alias))
);
- m.impl("align_as",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::align_as))
- );
- m.impl("align_tensors",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::align_tensors))
- );
- m.impl_UNBOXED("align_to", &VariableType::align_to);
- m.impl_UNBOXED("align_to.ellipsis_idx", &VariableType::align_to_ellipsis_idx);
m.impl("all.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::all_dim))
);
- m.impl_UNBOXED("all.dimname", &VariableType::all_dimname);
m.impl("all",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::all))
);
m.impl_UNBOXED("all.out", &VariableType::all_out_out);
- m.impl_UNBOXED("all.dimname_out", &VariableType::all_out_dimname_out);
- m.impl("allclose",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::allclose))
- );
- m.impl("alpha_dropout",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::alpha_dropout))
- );
- m.impl_UNBOXED("alpha_dropout_", &VariableType::alpha_dropout_);
m.impl("angle",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::angle))
);
@@ -40582,22 +38396,10 @@
m.impl("any.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::any_dim))
);
- m.impl_UNBOXED("any.dimname", &VariableType::any_dimname);
m.impl("any",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::any))
);
m.impl_UNBOXED("any.out", &VariableType::any_out_out);
- m.impl_UNBOXED("any.dimname_out", &VariableType::any_out_dimname_out);
- m.impl("arange",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::arange))
- );
- m.impl("arange.start",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::arange_start))
- );
- m.impl("arange.start_step",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::arange_start_step))
- );
- m.impl_UNBOXED("arange.out", &VariableType::arange_out_out);
m.impl_UNBOXED("arange.start_out", &VariableType::arange_out_start_out);
m.impl("argmax",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::argmax))
@@ -40605,10 +38407,6 @@
m.impl("argmin",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::argmin))
);
- m.impl("argsort",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::argsort))
- );
- m.impl_UNBOXED("argsort.dimname", &VariableType::argsort_dimname);
m.impl("as_strided",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::as_strided))
);
@@ -40638,27 +38436,6 @@
);
m.impl_UNBOXED("atanh_", &VariableType::atanh_);
m.impl_UNBOXED("atanh.out", &VariableType::atanh_out_out);
- m.impl("atleast_1d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::atleast_1d))
- );
- m.impl("atleast_1d.Sequence",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::atleast_1d_Sequence))
- );
- m.impl("atleast_2d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::atleast_2d))
- );
- m.impl("atleast_2d.Sequence",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::atleast_2d_Sequence))
- );
- m.impl("atleast_3d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::atleast_3d))
- );
- m.impl("atleast_3d.Sequence",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::atleast_3d_Sequence))
- );
- m.impl("avg_pool1d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::avg_pool1d))
- );
m.impl("avg_pool2d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::avg_pool2d))
);
@@ -40680,13 +38457,6 @@
);
m.impl_UNBOXED("baddbmm_", &VariableType::baddbmm_);
m.impl_UNBOXED("baddbmm.out", &VariableType::baddbmm_out_out);
- m.impl("bartlett_window",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bartlett_window))
- );
- m.impl("bartlett_window.periodic",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bartlett_window_periodic))
- );
- m.impl_UNBOXED("batch_norm", &VariableType::batch_norm);
m.impl_UNBOXED("batch_norm_backward_elemt", &VariableType::batch_norm_backward_elemt);
m.impl_UNBOXED("batch_norm_backward_reduce", &VariableType::batch_norm_backward_reduce);
m.impl_UNBOXED("batch_norm_elemt", &VariableType::batch_norm_elemt);
@@ -40698,70 +38468,27 @@
);
m.impl_UNBOXED("batch_norm_update_stats", &VariableType::batch_norm_update_stats);
m.impl_UNBOXED("bernoulli", &VariableType::bernoulli);
- m.impl_UNBOXED("bernoulli.p", &VariableType::bernoulli_p);
m.impl_UNBOXED("bernoulli_.Tensor", &VariableType::bernoulli__Tensor);
m.impl_UNBOXED("bernoulli_.float", &VariableType::bernoulli__float);
m.impl_UNBOXED("bernoulli.out", &VariableType::bernoulli_out_out);
- m.impl_UNBOXED("bilinear", &VariableType::bilinear);
m.impl_UNBOXED("binary_cross_entropy", &VariableType::binary_cross_entropy);
m.impl_UNBOXED("binary_cross_entropy_backward", &VariableType::binary_cross_entropy_backward);
m.impl_UNBOXED("binary_cross_entropy_backward.grad_input", &VariableType::binary_cross_entropy_backward_out_grad_input);
m.impl_UNBOXED("binary_cross_entropy.out", &VariableType::binary_cross_entropy_out_out);
m.impl_UNBOXED("binary_cross_entropy_with_logits", &VariableType::binary_cross_entropy_with_logits);
- m.impl_UNBOXED("binary_cross_entropy_with_logits_backward", &VariableType::binary_cross_entropy_with_logits_backward);
m.impl_UNBOXED("bincount", &VariableType::bincount);
m.impl_UNBOXED("binomial", &VariableType::binomial);
- m.impl("bitwise_and.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_and_Scalar))
- );
- m.impl("bitwise_and.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_and_Tensor))
- );
- m.impl_UNBOXED("bitwise_and_.Scalar", &VariableType::bitwise_and__Scalar);
- m.impl_UNBOXED("bitwise_and_.Tensor", &VariableType::bitwise_and__Tensor);
m.impl_UNBOXED("bitwise_and.Tensor_out", &VariableType::bitwise_and_out_Tensor_out);
m.impl_UNBOXED("bitwise_and.Scalar_out", &VariableType::bitwise_and_out_Scalar_out);
- m.impl("bitwise_not",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_not))
- );
- m.impl_UNBOXED("bitwise_not_", &VariableType::bitwise_not_);
m.impl_UNBOXED("bitwise_not.out", &VariableType::bitwise_not_out_out);
- m.impl("bitwise_or.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_or_Scalar))
- );
- m.impl("bitwise_or.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_or_Tensor))
- );
- m.impl_UNBOXED("bitwise_or_.Scalar", &VariableType::bitwise_or__Scalar);
- m.impl_UNBOXED("bitwise_or_.Tensor", &VariableType::bitwise_or__Tensor);
m.impl_UNBOXED("bitwise_or.Tensor_out", &VariableType::bitwise_or_out_Tensor_out);
m.impl_UNBOXED("bitwise_or.Scalar_out", &VariableType::bitwise_or_out_Scalar_out);
- m.impl("bitwise_xor.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_xor_Scalar))
- );
- m.impl("bitwise_xor.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bitwise_xor_Tensor))
- );
- m.impl_UNBOXED("bitwise_xor_.Scalar", &VariableType::bitwise_xor__Scalar);
- m.impl_UNBOXED("bitwise_xor_.Tensor", &VariableType::bitwise_xor__Tensor);
m.impl_UNBOXED("bitwise_xor.Tensor_out", &VariableType::bitwise_xor_out_Tensor_out);
m.impl_UNBOXED("bitwise_xor.Scalar_out", &VariableType::bitwise_xor_out_Scalar_out);
- m.impl("blackman_window",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::blackman_window))
- );
- m.impl("blackman_window.periodic",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::blackman_window_periodic))
- );
- m.impl("block_diag",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::block_diag))
- );
m.impl("bmm",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bmm))
);
m.impl_UNBOXED("bmm.out", &VariableType::bmm_out_out);
- m.impl("broadcast_tensors",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::broadcast_tensors))
- );
m.impl("bucketize.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bucketize_Tensor))
);
@@ -40769,22 +38496,11 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::bucketize_Scalar))
);
m.impl_UNBOXED("bucketize.Tensor_out", &VariableType::bucketize_out_Tensor_out);
- m.impl("can_cast",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::can_cast))
- );
- m.impl("cartesian_prod",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cartesian_prod))
- );
m.impl("cat",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cat))
);
- m.impl_UNBOXED("cat.names", &VariableType::cat_names);
m.impl_UNBOXED("cat.out", &VariableType::cat_out_out);
- m.impl_UNBOXED("cat.names_out", &VariableType::cat_out_names_out);
m.impl_UNBOXED("cauchy_", &VariableType::cauchy_);
- m.impl("cdist",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cdist))
- );
m.impl("ceil",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ceil))
);
@@ -40794,9 +38510,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::celu))
);
m.impl_UNBOXED("celu_", &VariableType::celu_);
- m.impl("chain_matmul",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::chain_matmul))
- );
m.impl("channel_shuffle",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::channel_shuffle))
);
@@ -40812,9 +38525,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cholesky_solve))
);
m.impl_UNBOXED("cholesky_solve.out", &VariableType::cholesky_solve_out_out);
- m.impl("chunk",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::chunk))
- );
m.impl("clamp",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::clamp))
);
@@ -40844,9 +38554,6 @@
);
m.impl_UNBOXED("col2im_backward.grad_input", &VariableType::col2im_backward_out_grad_input);
m.impl_UNBOXED("col2im.out", &VariableType::col2im_out_out);
- m.impl("combinations",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::combinations))
- );
m.impl("conj",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::conj))
);
@@ -40854,22 +38561,9 @@
m.impl("constant_pad_nd",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::constant_pad_nd))
);
- m.impl("contiguous",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::contiguous))
- );
- m.impl_UNBOXED("conv1d", &VariableType::conv1d);
- m.impl_UNBOXED("conv2d", &VariableType::conv2d);
- m.impl_UNBOXED("conv3d", &VariableType::conv3d);
m.impl("conv_tbc",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::conv_tbc))
);
- m.impl("conv_tbc_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::conv_tbc_backward))
- );
- m.impl_UNBOXED("conv_transpose1d", &VariableType::conv_transpose1d);
- m.impl_UNBOXED("conv_transpose2d.input", &VariableType::conv_transpose2d_input);
- m.impl_UNBOXED("conv_transpose3d.input", &VariableType::conv_transpose3d_input);
- m.impl_UNBOXED("convolution", &VariableType::convolution);
m.impl("convolution_backward_overrideable",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::convolution_backward_overrideable))
);
@@ -40885,12 +38579,6 @@
);
m.impl_UNBOXED("cosh_", &VariableType::cosh_);
m.impl_UNBOXED("cosh.out", &VariableType::cosh_out_out);
- m.impl("cosine_embedding_loss",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cosine_embedding_loss))
- );
- m.impl("cosine_similarity",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cosine_similarity))
- );
m.impl("count_nonzero.dim_IntList",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::count_nonzero_dim_IntList))
);
@@ -40901,12 +38589,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cross))
);
m.impl_UNBOXED("cross.out", &VariableType::cross_out_out);
- m.impl("ctc_loss.IntList",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ctc_loss_IntList))
- );
- m.impl("ctc_loss.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ctc_loss_Tensor))
- );
m.impl("cudnn_affine_grid_generator",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cudnn_affine_grid_generator))
);
@@ -40947,33 +38629,22 @@
m.impl("cudnn_grid_sampler_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cudnn_grid_sampler_backward))
);
- m.impl("cudnn_is_acceptable",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cudnn_is_acceptable))
- );
m.impl("cummax",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cummax))
);
- m.impl_UNBOXED("cummax.dimname", &VariableType::cummax_dimname);
m.impl_UNBOXED("cummax.out", &VariableType::cummax_out_out);
- m.impl_UNBOXED("cummax.dimname_out", &VariableType::cummax_out_dimname_out);
m.impl("cummin",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cummin))
);
- m.impl_UNBOXED("cummin.dimname", &VariableType::cummin_dimname);
m.impl_UNBOXED("cummin.out", &VariableType::cummin_out_out);
- m.impl_UNBOXED("cummin.dimname_out", &VariableType::cummin_out_dimname_out);
m.impl("cumprod",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cumprod))
);
- m.impl_UNBOXED("cumprod.dimname", &VariableType::cumprod_dimname);
m.impl_UNBOXED("cumprod.out", &VariableType::cumprod_out_out);
- m.impl_UNBOXED("cumprod.dimname_out", &VariableType::cumprod_out_dimname_out);
m.impl("cumsum",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::cumsum))
);
- m.impl_UNBOXED("cumsum.dimname", &VariableType::cumsum_dimname);
m.impl_UNBOXED("cumsum.out", &VariableType::cumsum_out_out);
- m.impl_UNBOXED("cumsum.dimname_out", &VariableType::cumsum_out_dimname_out);
m.impl("deg2rad",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::deg2rad))
);
@@ -40994,17 +38665,10 @@
m.impl("diag",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::diag))
);
- m.impl("diag_embed",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::diag_embed))
- );
m.impl_UNBOXED("diag.out", &VariableType::diag_out_out);
- m.impl("diagflat",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::diagflat))
- );
m.impl("diagonal",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::diagonal))
);
- m.impl_UNBOXED("diagonal.Dimname", &VariableType::diagonal_Dimname);
m.impl("digamma",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::digamma))
);
@@ -41026,17 +38690,10 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::dot))
);
m.impl_UNBOXED("dot.out", &VariableType::dot_out_out);
- m.impl("dropout",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::dropout))
- );
- m.impl_UNBOXED("dropout_", &VariableType::dropout_);
m.impl("eig",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::eig))
);
m.impl_UNBOXED("eig.e", &VariableType::eig_out_e);
- m.impl("einsum",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::einsum))
- );
m.impl("elu",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::elu))
);
@@ -41049,28 +38706,13 @@
m.impl("embedding",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::embedding))
);
- m.impl("embedding_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::embedding_backward))
- );
- m.impl_UNBOXED("embedding_bag", &VariableType::embedding_bag);
m.impl("embedding_dense_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::embedding_dense_backward))
);
m.impl_UNBOXED("embedding_renorm_", &VariableType::embedding_renorm_);
- m.impl("embedding_sparse_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::embedding_sparse_backward))
- );
- m.impl_UNBOXED("empty.names", &VariableType::empty_names);
m.impl("empty.memory_format",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::empty_memory_format))
);
- m.impl("empty_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::empty_like))
- );
- m.impl("empty_meta",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::empty_meta))
- );
- m.impl_UNBOXED("empty.out", &VariableType::empty_out_out);
m.impl_UNBOXED("empty_quantized", &VariableType::empty_quantized);
m.impl("empty_strided",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::empty_strided))
@@ -41111,88 +38753,25 @@
m.impl("expand",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::expand))
);
- m.impl("expand_as",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::expand_as))
- );
m.impl("expm1",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::expm1))
);
m.impl_UNBOXED("expm1_", &VariableType::expm1_);
m.impl_UNBOXED("expm1.out", &VariableType::expm1_out_out);
m.impl_UNBOXED("exponential_", &VariableType::exponential_);
- m.impl("eye",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::eye))
- );
- m.impl("eye.m",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::eye_m))
- );
m.impl_UNBOXED("eye.out", &VariableType::eye_out_out);
m.impl_UNBOXED("eye.m_out", &VariableType::eye_out_m_out);
m.impl("fake_quantize_per_channel_affine",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fake_quantize_per_channel_affine))
);
- m.impl("fake_quantize_per_channel_affine_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fake_quantize_per_channel_affine_backward))
- );
m.impl("fake_quantize_per_tensor_affine",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fake_quantize_per_tensor_affine))
);
- m.impl("fake_quantize_per_tensor_affine_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fake_quantize_per_tensor_affine_backward))
- );
- m.impl("fbgemm_linear_fp16_weight",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_linear_fp16_weight))
- );
- m.impl("fbgemm_linear_fp16_weight_fp32_activation",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_linear_fp16_weight_fp32_activation))
- );
- m.impl("fbgemm_linear_int8_weight",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_linear_int8_weight))
- );
- m.impl("fbgemm_linear_int8_weight_fp32_activation",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_linear_int8_weight_fp32_activation))
- );
- m.impl("fbgemm_linear_quantize_weight",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_linear_quantize_weight))
- );
- m.impl("fbgemm_pack_gemm_matrix_fp16",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_pack_gemm_matrix_fp16))
- );
- m.impl("fbgemm_pack_quantized_matrix",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_pack_quantized_matrix))
- );
- m.impl("fbgemm_pack_quantized_matrix.KN",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fbgemm_pack_quantized_matrix_KN))
- );
- m.impl("feature_alpha_dropout",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::feature_alpha_dropout))
- );
- m.impl_UNBOXED("feature_alpha_dropout_", &VariableType::feature_alpha_dropout_);
- m.impl("feature_dropout",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::feature_dropout))
- );
- m.impl_UNBOXED("feature_dropout_", &VariableType::feature_dropout_);
- m.impl("fft",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fft))
- );
m.impl_UNBOXED("fill_.Scalar", &VariableType::fill__Scalar);
m.impl_UNBOXED("fill_.Tensor", &VariableType::fill__Tensor);
- m.impl_UNBOXED("fill_diagonal_", &VariableType::fill_diagonal_);
- m.impl("flatten.using_ints",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::flatten_using_ints))
- );
- m.impl_UNBOXED("flatten.named_out_dim", &VariableType::flatten_named_out_dim);
- m.impl_UNBOXED("flatten.using_names", &VariableType::flatten_using_names);
- m.impl_UNBOXED("flatten.DimnameList", &VariableType::flatten_DimnameList);
m.impl("flip",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::flip))
);
- m.impl("fliplr",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::fliplr))
- );
- m.impl("flipud",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::flipud))
- );
m.impl("floor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::floor))
);
@@ -41200,11 +38779,7 @@
m.impl("floor_divide",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::floor_divide))
);
- m.impl("floor_divide.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::floor_divide_Scalar))
- );
m.impl_UNBOXED("floor_divide_.Tensor", &VariableType::floor_divide__Tensor);
- m.impl_UNBOXED("floor_divide_.Scalar", &VariableType::floor_divide__Scalar);
m.impl_UNBOXED("floor_divide.out", &VariableType::floor_divide_out_out);
m.impl_UNBOXED("floor.out", &VariableType::floor_out_out);
m.impl("fmod.Scalar",
@@ -41238,35 +38813,13 @@
);
m.impl_UNBOXED("fractional_max_pool3d_backward.grad_input", &VariableType::fractional_max_pool3d_backward_out_grad_input);
m.impl_UNBOXED("fractional_max_pool3d.output", &VariableType::fractional_max_pool3d_out_output);
- m.impl("frobenius_norm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::frobenius_norm))
- );
- m.impl("frobenius_norm.dim",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::frobenius_norm_dim))
- );
- m.impl_UNBOXED("frobenius_norm.out", &VariableType::frobenius_norm_out_out);
m.impl("from_file",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::from_file))
);
- m.impl_UNBOXED("full.names", &VariableType::full_names);
- m.impl("full",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::full))
- );
- m.impl("full_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::full_like))
- );
- m.impl_UNBOXED("full.out", &VariableType::full_out_out);
m.impl("gather",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::gather))
);
- m.impl_UNBOXED("gather.dimname", &VariableType::gather_dimname);
m.impl_UNBOXED("gather.out", &VariableType::gather_out_out);
- m.impl_UNBOXED("gather.dimname_out", &VariableType::gather_out_dimname_out);
- m.impl("gcd",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::gcd))
- );
- m.impl_UNBOXED("gcd_", &VariableType::gcd_);
- m.impl_UNBOXED("gcd.out", &VariableType::gcd_out_out);
m.impl("ge.Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ge_Scalar))
);
@@ -41300,9 +38853,6 @@
);
m.impl_UNBOXED("glu_backward.grad_input", &VariableType::glu_backward_out_grad_input);
m.impl_UNBOXED("glu.out", &VariableType::glu_out_out);
- m.impl("grid_sampler",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::grid_sampler))
- );
m.impl("grid_sampler_2d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::grid_sampler_2d))
);
@@ -41315,14 +38865,6 @@
m.impl("grid_sampler_3d_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::grid_sampler_3d_backward))
);
- m.impl_UNBOXED("group_norm", &VariableType::group_norm);
- m.impl("gru.input",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::gru_input))
- );
- m.impl("gru.data",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::gru_data))
- );
- m.impl_UNBOXED("gru_cell", &VariableType::gru_cell);
m.impl("gt.Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::gt_Scalar))
);
@@ -41333,24 +38875,6 @@
m.impl_UNBOXED("gt_.Tensor", &VariableType::gt__Tensor);
m.impl_UNBOXED("gt.Scalar_out", &VariableType::gt_out_Scalar_out);
m.impl_UNBOXED("gt.Tensor_out", &VariableType::gt_out_Tensor_out);
- m.impl("hamming_window",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hamming_window))
- );
- m.impl("hamming_window.periodic",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hamming_window_periodic))
- );
- m.impl("hamming_window.periodic_alpha",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hamming_window_periodic_alpha))
- );
- m.impl("hamming_window.periodic_alpha_beta",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hamming_window_periodic_alpha_beta))
- );
- m.impl("hann_window",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hann_window))
- );
- m.impl("hann_window.periodic",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hann_window_periodic))
- );
m.impl("hardshrink",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hardshrink))
);
@@ -41382,9 +38906,6 @@
);
m.impl_UNBOXED("hardtanh_backward.grad_input", &VariableType::hardtanh_backward_out_grad_input);
m.impl_UNBOXED("hardtanh.out", &VariableType::hardtanh_out_out);
- m.impl("hinge_embedding_loss",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hinge_embedding_loss))
- );
m.impl("histc",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::histc))
);
@@ -41393,9 +38914,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::hspmm))
);
m.impl_UNBOXED("hspmm.out", &VariableType::hspmm_out_out);
- m.impl("ifft",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ifft))
- );
m.impl("im2col",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::im2col))
);
@@ -41404,45 +38922,19 @@
);
m.impl_UNBOXED("im2col_backward.grad_input", &VariableType::im2col_backward_out_grad_input);
m.impl_UNBOXED("im2col.out", &VariableType::im2col_out_out);
- m.impl("imag",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::imag))
- );
m.impl_UNBOXED("index.Tensor", &VariableType::index_Tensor);
- m.impl("index_add",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::index_add))
- );
- m.impl_UNBOXED("index_add.dimname", &VariableType::index_add_dimname);
m.impl_UNBOXED("index_add_", &VariableType::index_add_);
- m.impl("index_copy",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::index_copy))
- );
- m.impl_UNBOXED("index_copy.dimname", &VariableType::index_copy_dimname);
m.impl_UNBOXED("index_copy_", &VariableType::index_copy_);
- m.impl_UNBOXED("index_copy_.dimname", &VariableType::index_copy__dimname);
- m.impl("index_fill.int_Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::index_fill_int_Scalar))
- );
- m.impl("index_fill.int_Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::index_fill_int_Tensor))
- );
- m.impl_UNBOXED("index_fill.Dimname_Scalar", &VariableType::index_fill_Dimname_Scalar);
- m.impl_UNBOXED("index_fill.Dimname_Tensor", &VariableType::index_fill_Dimname_Tensor);
m.impl_UNBOXED("index_fill_.int_Scalar", &VariableType::index_fill__int_Scalar);
m.impl_UNBOXED("index_fill_.int_Tensor", &VariableType::index_fill__int_Tensor);
- m.impl_UNBOXED("index_fill_.Dimname_Scalar", &VariableType::index_fill__Dimname_Scalar);
- m.impl_UNBOXED("index_fill_.Dimname_Tensor", &VariableType::index_fill__Dimname_Tensor);
- m.impl_UNBOXED("index_put", &VariableType::index_put);
m.impl_UNBOXED("index_put_", &VariableType::index_put_);
m.impl("index_select",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::index_select))
);
- m.impl_UNBOXED("index_select.dimname", &VariableType::index_select_dimname);
m.impl_UNBOXED("index_select.out", &VariableType::index_select_out_out);
- m.impl_UNBOXED("index_select.dimname_out", &VariableType::index_select_out_dimname_out);
m.impl("indices",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::indices))
);
- m.impl_UNBOXED("instance_norm", &VariableType::instance_norm);
m.impl("int_repr",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::int_repr))
);
@@ -41450,66 +38942,17 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::inverse))
);
m.impl_UNBOXED("inverse.out", &VariableType::inverse_out_out);
- m.impl("irfft",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::irfft))
- );
m.impl("is_coalesced",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_coalesced))
);
- m.impl("is_complex",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_complex))
- );
- m.impl("is_distributed",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_distributed))
- );
- m.impl("is_floating_point",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_floating_point))
- );
- m.impl("is_nonzero",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_nonzero))
- );
- m.impl("is_pinned",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_pinned))
- );
- m.impl("is_same_size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_same_size))
- );
m.impl("is_set_to",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_set_to))
);
- m.impl("is_signed",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_signed))
- );
- m.impl("is_vulkan_available",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::is_vulkan_available))
- );
- m.impl("isclose",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isclose))
- );
- m.impl("isfinite",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isfinite))
- );
- m.impl("isinf",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isinf))
- );
m.impl("isnan",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isnan))
);
- m.impl("isneginf",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isneginf))
- );
m.impl_UNBOXED("isneginf.out", &VariableType::isneginf_out_out);
- m.impl("isposinf",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isposinf))
- );
m.impl_UNBOXED("isposinf.out", &VariableType::isposinf_out_out);
- m.impl("isreal",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::isreal))
- );
- m.impl_UNBOXED("istft", &VariableType::istft);
- m.impl("item",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::item))
- );
m.impl("kl_div",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::kl_div))
);
@@ -41519,9 +38962,7 @@
m.impl("kthvalue",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::kthvalue))
);
- m.impl_UNBOXED("kthvalue.dimname", &VariableType::kthvalue_dimname);
m.impl_UNBOXED("kthvalue.values", &VariableType::kthvalue_out_values);
- m.impl_UNBOXED("kthvalue.dimname_out", &VariableType::kthvalue_out_dimname_out);
m.impl("l1_loss",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::l1_loss))
);
@@ -41530,12 +38971,6 @@
);
m.impl_UNBOXED("l1_loss_backward.grad_input", &VariableType::l1_loss_backward_out_grad_input);
m.impl_UNBOXED("l1_loss.out", &VariableType::l1_loss_out_out);
- m.impl_UNBOXED("layer_norm", &VariableType::layer_norm);
- m.impl("lcm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::lcm))
- );
- m.impl_UNBOXED("lcm_", &VariableType::lcm_);
- m.impl_UNBOXED("lcm.out", &VariableType::lcm_out_out);
m.impl("le.Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::le_Scalar))
);
@@ -41569,10 +39004,6 @@
);
m.impl_UNBOXED("lgamma_", &VariableType::lgamma_);
m.impl_UNBOXED("lgamma.out", &VariableType::lgamma_out_out);
- m.impl_UNBOXED("linear", &VariableType::linear);
- m.impl("linspace",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::linspace))
- );
m.impl_UNBOXED("linspace.out", &VariableType::linspace_out_out);
m.impl("log",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::log))
@@ -41595,9 +39026,6 @@
m.impl_UNBOXED("log_", &VariableType::log_);
m.impl_UNBOXED("log_normal_", &VariableType::log_normal_);
m.impl_UNBOXED("log.out", &VariableType::log_out_out);
- m.impl("log_sigmoid",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::log_sigmoid))
- );
m.impl("log_sigmoid_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::log_sigmoid_backward))
);
@@ -41606,11 +39034,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::log_sigmoid_forward))
);
m.impl_UNBOXED("log_sigmoid_forward.output", &VariableType::log_sigmoid_forward_out_output);
- m.impl_UNBOXED("log_sigmoid.out", &VariableType::log_sigmoid_out_out);
- m.impl("log_softmax.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::log_softmax_int))
- );
- m.impl_UNBOXED("log_softmax.Dimname", &VariableType::log_softmax_Dimname);
m.impl("logaddexp",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logaddexp))
);
@@ -41622,58 +39045,25 @@
m.impl("logcumsumexp",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logcumsumexp))
);
- m.impl_UNBOXED("logcumsumexp.dimname", &VariableType::logcumsumexp_dimname);
m.impl_UNBOXED("logcumsumexp.out", &VariableType::logcumsumexp_out_out);
- m.impl_UNBOXED("logcumsumexp.dimname_out", &VariableType::logcumsumexp_out_dimname_out);
m.impl("logdet",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logdet))
);
- m.impl("logical_and",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logical_and))
- );
- m.impl_UNBOXED("logical_and_", &VariableType::logical_and_);
m.impl_UNBOXED("logical_and.out", &VariableType::logical_and_out_out);
- m.impl("logical_not",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logical_not))
- );
- m.impl_UNBOXED("logical_not_", &VariableType::logical_not_);
m.impl_UNBOXED("logical_not.out", &VariableType::logical_not_out_out);
- m.impl("logical_or",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logical_or))
- );
- m.impl_UNBOXED("logical_or_", &VariableType::logical_or_);
m.impl_UNBOXED("logical_or.out", &VariableType::logical_or_out_out);
- m.impl("logical_xor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logical_xor))
- );
- m.impl_UNBOXED("logical_xor_", &VariableType::logical_xor_);
m.impl_UNBOXED("logical_xor.out", &VariableType::logical_xor_out_out);
m.impl("logit",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logit))
);
m.impl_UNBOXED("logit_", &VariableType::logit_);
- m.impl("logit_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logit_backward))
- );
m.impl_UNBOXED("logit_backward.grad_input", &VariableType::logit_backward_out_grad_input);
m.impl_UNBOXED("logit.out", &VariableType::logit_out_out);
- m.impl("logspace",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logspace))
- );
m.impl_UNBOXED("logspace.out", &VariableType::logspace_out_out);
m.impl("logsumexp",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::logsumexp))
);
- m.impl_UNBOXED("logsumexp.names", &VariableType::logsumexp_names);
m.impl_UNBOXED("logsumexp.out", &VariableType::logsumexp_out_out);
- m.impl_UNBOXED("logsumexp.names_out", &VariableType::logsumexp_out_names_out);
- m.impl("lstm.input",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::lstm_input))
- );
- m.impl("lstm.data",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::lstm_data))
- );
- m.impl_UNBOXED("lstm_cell", &VariableType::lstm_cell);
m.impl("lstsq",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::lstsq))
);
@@ -41692,42 +39082,16 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::lu_solve))
);
m.impl_UNBOXED("lu_solve.out", &VariableType::lu_solve_out_out);
- m.impl("margin_ranking_loss",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::margin_ranking_loss))
- );
- m.impl("masked_fill.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::masked_fill_Scalar))
- );
- m.impl("masked_fill.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::masked_fill_Tensor))
- );
m.impl_UNBOXED("masked_fill_.Scalar", &VariableType::masked_fill__Scalar);
m.impl_UNBOXED("masked_fill_.Tensor", &VariableType::masked_fill__Tensor);
- m.impl("masked_scatter",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::masked_scatter))
- );
m.impl_UNBOXED("masked_scatter_", &VariableType::masked_scatter_);
m.impl("masked_select",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::masked_select))
);
m.impl_UNBOXED("masked_select.out", &VariableType::masked_select_out_out);
- m.impl("matmul",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::matmul))
- );
- m.impl_UNBOXED("matmul.out", &VariableType::matmul_out_out);
- m.impl("matrix_power",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::matrix_power))
- );
- m.impl("matrix_rank.tol",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::matrix_rank_tol))
- );
- m.impl("matrix_rank",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::matrix_rank))
- );
m.impl("max.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_dim))
);
- m.impl_UNBOXED("max.names_dim", &VariableType::max_names_dim);
m.impl("max.other",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_other))
);
@@ -41735,17 +39099,7 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max))
);
m.impl_UNBOXED("max.dim_max", &VariableType::max_out_dim_max);
- m.impl_UNBOXED("max.names_dim_max", &VariableType::max_out_names_dim_max);
m.impl_UNBOXED("max.out", &VariableType::max_out_out);
- m.impl("max_pool1d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_pool1d))
- );
- m.impl("max_pool1d_with_indices",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_pool1d_with_indices))
- );
- m.impl("max_pool2d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_pool2d))
- );
m.impl("max_pool2d_with_indices",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_pool2d_with_indices))
);
@@ -41754,9 +39108,6 @@
);
m.impl_UNBOXED("max_pool2d_with_indices_backward.grad_input", &VariableType::max_pool2d_with_indices_backward_out_grad_input);
m.impl_UNBOXED("max_pool2d_with_indices.out", &VariableType::max_pool2d_with_indices_out_out);
- m.impl("max_pool3d",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_pool3d))
- );
m.impl("max_pool3d_with_indices",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_pool3d_with_indices))
);
@@ -41781,35 +39132,23 @@
);
m.impl_UNBOXED("max_unpool3d_backward.grad_input", &VariableType::max_unpool3d_backward_out_grad_input);
m.impl_UNBOXED("max_unpool3d.out", &VariableType::max_unpool3d_out_out);
- m.impl("max_values",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::max_values))
- );
- m.impl_UNBOXED("max_values.names", &VariableType::max_values_names);
m.impl("mean",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mean))
);
m.impl("mean.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mean_dim))
);
- m.impl_UNBOXED("mean.names_dim", &VariableType::mean_names_dim);
m.impl_UNBOXED("mean.out", &VariableType::mean_out_out);
- m.impl_UNBOXED("mean.names_out", &VariableType::mean_out_names_out);
m.impl("median.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::median_dim))
);
- m.impl_UNBOXED("median.names_dim", &VariableType::median_names_dim);
m.impl("median",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::median))
);
m.impl_UNBOXED("median.dim_values", &VariableType::median_out_dim_values);
- m.impl_UNBOXED("median.names_dim_values", &VariableType::median_out_names_dim_values);
- m.impl("meshgrid",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::meshgrid))
- );
m.impl("min.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::min_dim))
);
- m.impl_UNBOXED("min.names_dim", &VariableType::min_names_dim);
m.impl("min.other",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::min_other))
);
@@ -41817,12 +39156,7 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::min))
);
m.impl_UNBOXED("min.dim_min", &VariableType::min_out_dim_min);
- m.impl_UNBOXED("min.names_dim_min", &VariableType::min_out_names_dim_min);
m.impl_UNBOXED("min.out", &VariableType::min_out_out);
- m.impl("min_values",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::min_values))
- );
- m.impl_UNBOXED("min_values.names", &VariableType::min_values_names);
m.impl_UNBOXED("miopen_batch_norm", &VariableType::miopen_batch_norm);
m.impl_UNBOXED("miopen_batch_norm_backward", &VariableType::miopen_batch_norm_backward);
m.impl_UNBOXED("miopen_convolution", &VariableType::miopen_convolution);
@@ -41867,12 +39201,6 @@
m.impl("mkldnn_convolution_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mkldnn_convolution_backward))
);
- m.impl("mkldnn_convolution_backward_input",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mkldnn_convolution_backward_input))
- );
- m.impl("mkldnn_convolution_backward_weights",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mkldnn_convolution_backward_weights))
- );
m.impl_UNBOXED("mkldnn_linear", &VariableType::mkldnn_linear);
m.impl("mkldnn_max_pool2d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mkldnn_max_pool2d))
@@ -41893,15 +39221,7 @@
m.impl("mode",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mode))
);
- m.impl_UNBOXED("mode.dimname", &VariableType::mode_dimname);
m.impl_UNBOXED("mode.values", &VariableType::mode_out_values);
- m.impl_UNBOXED("mode.dimname_out", &VariableType::mode_out_dimname_out);
- m.impl("movedim.intlist",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::movedim_intlist))
- );
- m.impl("movedim.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::movedim_int))
- );
m.impl("mse_loss",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mse_loss))
);
@@ -41923,9 +39243,6 @@
m.impl_UNBOXED("multi_margin_loss_backward", &VariableType::multi_margin_loss_backward);
m.impl_UNBOXED("multi_margin_loss_backward.grad_input", &VariableType::multi_margin_loss_backward_out_grad_input);
m.impl_UNBOXED("multi_margin_loss.out", &VariableType::multi_margin_loss_out_out);
- m.impl("multilabel_margin_loss",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::multilabel_margin_loss))
- );
m.impl("multilabel_margin_loss_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::multilabel_margin_loss_backward))
);
@@ -41934,7 +39251,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::multilabel_margin_loss_forward))
);
m.impl_UNBOXED("multilabel_margin_loss_forward.output", &VariableType::multilabel_margin_loss_forward_out_output);
- m.impl_UNBOXED("multilabel_margin_loss.out", &VariableType::multilabel_margin_loss_out_out);
m.impl_UNBOXED("multinomial", &VariableType::multinomial);
m.impl_UNBOXED("multinomial.out", &VariableType::multinomial_out_out);
m.impl("mv",
@@ -41945,12 +39261,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::mvlgamma))
);
m.impl_UNBOXED("mvlgamma_", &VariableType::mvlgamma_);
- m.impl("narrow",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::narrow))
- );
- m.impl("narrow.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::narrow_Tensor))
- );
m.impl("narrow_copy",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::narrow_copy))
);
@@ -41979,33 +39289,17 @@
);
m.impl_UNBOXED("neg_", &VariableType::neg_);
m.impl_UNBOXED("neg.out", &VariableType::neg_out_out);
- m.impl("new_empty",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::new_empty))
- );
- m.impl("new_full",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::new_full))
- );
- m.impl("new_zeros",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::new_zeros))
- );
- m.impl_UNBOXED("nll_loss", &VariableType::nll_loss);
- m.impl_UNBOXED("nll_loss2d", &VariableType::nll_loss2d);
m.impl_UNBOXED("nll_loss2d_backward", &VariableType::nll_loss2d_backward);
m.impl_UNBOXED("nll_loss2d_backward.grad_input", &VariableType::nll_loss2d_backward_out_grad_input);
m.impl_UNBOXED("nll_loss2d_forward", &VariableType::nll_loss2d_forward);
m.impl_UNBOXED("nll_loss2d_forward.output", &VariableType::nll_loss2d_forward_out_output);
- m.impl_UNBOXED("nll_loss2d.out", &VariableType::nll_loss2d_out_out);
m.impl_UNBOXED("nll_loss_backward", &VariableType::nll_loss_backward);
m.impl_UNBOXED("nll_loss_backward.grad_input", &VariableType::nll_loss_backward_out_grad_input);
m.impl_UNBOXED("nll_loss_forward", &VariableType::nll_loss_forward);
m.impl_UNBOXED("nll_loss_forward.output", &VariableType::nll_loss_forward_out_output);
- m.impl_UNBOXED("nll_loss.out", &VariableType::nll_loss_out_out);
m.impl("nonzero",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::nonzero))
);
- m.impl("nonzero_numpy",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::nonzero_numpy))
- );
m.impl_UNBOXED("nonzero.out", &VariableType::nonzero_out_out);
m.impl("norm.ScalarOpt_dtype",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::norm_ScalarOpt_dtype))
@@ -42019,46 +39313,15 @@
m.impl("norm.ScalarOpt_dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::norm_ScalarOpt_dim))
);
- m.impl_UNBOXED("norm.names_ScalarOpt_dim_dtype", &VariableType::norm_names_ScalarOpt_dim_dtype);
- m.impl_UNBOXED("norm.names_ScalarOpt_dim", &VariableType::norm_names_ScalarOpt_dim);
- m.impl("norm_except_dim",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::norm_except_dim))
- );
m.impl_UNBOXED("norm.dtype_out", &VariableType::norm_out_dtype_out);
m.impl_UNBOXED("norm.out", &VariableType::norm_out_out);
- m.impl_UNBOXED("norm.names_dtype_out", &VariableType::norm_out_names_dtype_out);
- m.impl_UNBOXED("norm.names_out", &VariableType::norm_out_names_out);
m.impl_UNBOXED("normal.Tensor_float", &VariableType::normal_Tensor_float);
m.impl_UNBOXED("normal.float_Tensor", &VariableType::normal_float_Tensor);
m.impl_UNBOXED("normal.Tensor_Tensor", &VariableType::normal_Tensor_Tensor);
- m.impl_UNBOXED("normal.float_float", &VariableType::normal_float_float);
m.impl_UNBOXED("normal_", &VariableType::normal_);
m.impl_UNBOXED("normal.Tensor_float_out", &VariableType::normal_out_Tensor_float_out);
m.impl_UNBOXED("normal.float_Tensor_out", &VariableType::normal_out_float_Tensor_out);
m.impl_UNBOXED("normal.Tensor_Tensor_out", &VariableType::normal_out_Tensor_Tensor_out);
- m.impl_UNBOXED("normal.float_float_out", &VariableType::normal_out_float_float_out);
- m.impl("nuclear_norm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::nuclear_norm))
- );
- m.impl("nuclear_norm.dim",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::nuclear_norm_dim))
- );
- m.impl_UNBOXED("nuclear_norm.out", &VariableType::nuclear_norm_out_out);
- m.impl_UNBOXED("nuclear_norm.dim_out", &VariableType::nuclear_norm_out_dim_out);
- m.impl("numpy_T",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::numpy_T))
- );
- m.impl("one_hot",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::one_hot))
- );
- m.impl_UNBOXED("ones.names", &VariableType::ones_names);
- m.impl("ones",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ones))
- );
- m.impl("ones_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ones_like))
- );
- m.impl_UNBOXED("ones.out", &VariableType::ones_out_out);
m.impl("orgqr",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::orgqr))
);
@@ -42067,32 +39330,13 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::ormqr))
);
m.impl_UNBOXED("ormqr.out", &VariableType::ormqr_out_out);
- m.impl("pairwise_distance",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::pairwise_distance))
- );
- m.impl("pdist",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::pdist))
- );
m.impl("permute",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::permute))
);
- m.impl("pin_memory",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::pin_memory))
- );
- m.impl("pinverse",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::pinverse))
- );
- m.impl("pixel_shuffle",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::pixel_shuffle))
- );
m.impl_UNBOXED("poisson", &VariableType::poisson);
- m.impl("poisson_nll_loss",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::poisson_nll_loss))
- );
m.impl("polygamma",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::polygamma))
);
- m.impl_UNBOXED("polygamma_", &VariableType::polygamma_);
m.impl_UNBOXED("polygamma.out", &VariableType::polygamma_out_out);
m.impl("pow.Tensor_Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::pow_Tensor_Scalar))
@@ -42120,12 +39364,7 @@
m.impl("prod.dim_int",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::prod_dim_int))
);
- m.impl_UNBOXED("prod.dim_Dimname", &VariableType::prod_dim_Dimname);
m.impl_UNBOXED("prod.int_out", &VariableType::prod_out_int_out);
- m.impl_UNBOXED("prod.Dimname_out", &VariableType::prod_out_Dimname_out);
- m.impl("promote_types",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::promote_types))
- );
m.impl_UNBOXED("put_", &VariableType::put_);
m.impl("q_per_channel_axis",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::q_per_channel_axis))
@@ -42159,91 +39398,24 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::quantize_per_tensor_tensors))
);
m.impl_UNBOXED("quantized_batch_norm", &VariableType::quantized_batch_norm);
- m.impl("quantized_gru_cell",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::quantized_gru_cell))
- );
- m.impl("quantized_lstm_cell",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::quantized_lstm_cell))
- );
m.impl("quantized_max_pool2d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::quantized_max_pool2d))
);
- m.impl("quantized_rnn_relu_cell",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::quantized_rnn_relu_cell))
- );
- m.impl("quantized_rnn_tanh_cell",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::quantized_rnn_tanh_cell))
- );
m.impl("rad2deg",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rad2deg))
);
m.impl_UNBOXED("rad2deg_", &VariableType::rad2deg_);
m.impl_UNBOXED("rad2deg.out", &VariableType::rad2deg_out_out);
- m.impl_UNBOXED("rand.names", &VariableType::rand_names);
- m.impl_UNBOXED("rand.generator_with_names", &VariableType::rand_generator_with_names);
- m.impl("rand",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rand))
- );
- m.impl_UNBOXED("rand.generator", &VariableType::rand_generator);
- m.impl("rand_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rand_like))
- );
- m.impl_UNBOXED("rand.out", &VariableType::rand_out_out);
- m.impl_UNBOXED("rand.generator_out", &VariableType::rand_out_generator_out);
- m.impl("randint",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randint))
- );
- m.impl_UNBOXED("randint.generator", &VariableType::randint_generator);
- m.impl("randint.low",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randint_low))
- );
- m.impl_UNBOXED("randint.low_generator", &VariableType::randint_low_generator);
- m.impl("randint_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randint_like))
- );
- m.impl("randint_like.low_dtype",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randint_like_low_dtype))
- );
- m.impl_UNBOXED("randint.out", &VariableType::randint_out_out);
- m.impl_UNBOXED("randint.generator_out", &VariableType::randint_out_generator_out);
- m.impl_UNBOXED("randint.low_out", &VariableType::randint_out_low_out);
- m.impl_UNBOXED("randint.low_generator_out", &VariableType::randint_out_low_generator_out);
- m.impl("randn",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randn))
- );
- m.impl_UNBOXED("randn.generator", &VariableType::randn_generator);
- m.impl_UNBOXED("randn.names", &VariableType::randn_names);
- m.impl_UNBOXED("randn.generator_with_names", &VariableType::randn_generator_with_names);
- m.impl("randn_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randn_like))
- );
- m.impl_UNBOXED("randn.out", &VariableType::randn_out_out);
- m.impl_UNBOXED("randn.generator_out", &VariableType::randn_out_generator_out);
m.impl_UNBOXED("random_.from", &VariableType::random__from);
m.impl_UNBOXED("random_.to", &VariableType::random__to);
m.impl_UNBOXED("random_", &VariableType::random_);
- m.impl("randperm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::randperm))
- );
- m.impl_UNBOXED("randperm.generator", &VariableType::randperm_generator);
- m.impl_UNBOXED("randperm.out", &VariableType::randperm_out_out);
m.impl_UNBOXED("randperm.generator_out", &VariableType::randperm_out_generator_out);
- m.impl("range.step",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::range_step))
- );
- m.impl("range",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::range))
- );
m.impl_UNBOXED("range.out", &VariableType::range_out_out);
- m.impl("real",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::real))
- );
m.impl("reciprocal",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::reciprocal))
);
m.impl_UNBOXED("reciprocal_", &VariableType::reciprocal_);
m.impl_UNBOXED("reciprocal.out", &VariableType::reciprocal_out_out);
- m.impl_UNBOXED("refine_names", &VariableType::refine_names);
m.impl("reflection_pad1d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::reflection_pad1d))
);
@@ -42274,8 +39446,6 @@
m.impl_UNBOXED("remainder_.Tensor", &VariableType::remainder__Tensor);
m.impl_UNBOXED("remainder.Scalar_out", &VariableType::remainder_out_Scalar_out);
m.impl_UNBOXED("remainder.Tensor_out", &VariableType::remainder_out_Tensor_out);
- m.impl_UNBOXED("rename", &VariableType::rename);
- m.impl_UNBOXED("rename_", &VariableType::rename_);
m.impl("renorm",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::renorm))
);
@@ -42287,12 +39457,6 @@
m.impl("repeat_interleave.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::repeat_interleave_Tensor))
);
- m.impl("repeat_interleave.self_Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::repeat_interleave_self_Tensor))
- );
- m.impl("repeat_interleave.self_int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::repeat_interleave_self_int))
- );
m.impl("replication_pad1d",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::replication_pad1d))
);
@@ -42317,41 +39481,6 @@
);
m.impl_UNBOXED("replication_pad3d_backward.grad_input", &VariableType::replication_pad3d_backward_out_grad_input);
m.impl_UNBOXED("replication_pad3d.out", &VariableType::replication_pad3d_out_out);
- m.impl("reshape",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::reshape))
- );
- m.impl("reshape_as",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::reshape_as))
- );
- m.impl("result_type.Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::result_type_Tensor))
- );
- m.impl("result_type.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::result_type_Scalar))
- );
- m.impl("result_type.Scalar_Tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::result_type_Scalar_Tensor))
- );
- m.impl("result_type.Scalar_Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::result_type_Scalar_Scalar))
- );
- m.impl("rfft",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rfft))
- );
- m.impl("rnn_relu.input",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rnn_relu_input))
- );
- m.impl("rnn_relu.data",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rnn_relu_data))
- );
- m.impl_UNBOXED("rnn_relu_cell", &VariableType::rnn_relu_cell);
- m.impl("rnn_tanh.input",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rnn_tanh_input))
- );
- m.impl("rnn_tanh.data",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rnn_tanh_data))
- );
- m.impl_UNBOXED("rnn_tanh_cell", &VariableType::rnn_tanh_cell);
m.impl("roll",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::roll))
);
@@ -42363,8 +39492,6 @@
);
m.impl_UNBOXED("round_", &VariableType::round_);
m.impl_UNBOXED("round.out", &VariableType::round_out_out);
- m.impl_UNBOXED("rrelu", &VariableType::rrelu);
- m.impl_UNBOXED("rrelu_", &VariableType::rrelu_);
m.impl_UNBOXED("rrelu_with_noise", &VariableType::rrelu_with_noise);
m.impl_UNBOXED("rrelu_with_noise_", &VariableType::rrelu_with_noise_);
m.impl("rrelu_with_noise_backward",
@@ -42382,25 +39509,10 @@
m.impl("rsub.Scalar",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::rsub_Scalar))
);
- m.impl("scalar_tensor",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::scalar_tensor))
- );
- m.impl("scatter.src",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::scatter_src))
- );
- m.impl("scatter.value",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::scatter_value))
- );
- m.impl_UNBOXED("scatter.dimname_src", &VariableType::scatter_dimname_src);
- m.impl_UNBOXED("scatter.dimname_value", &VariableType::scatter_dimname_value);
m.impl_UNBOXED("scatter_.src", &VariableType::scatter__src);
m.impl_UNBOXED("scatter_.value", &VariableType::scatter__value);
m.impl_UNBOXED("scatter_.reduce", &VariableType::scatter__reduce);
m.impl_UNBOXED("scatter_.value_reduce", &VariableType::scatter__value_reduce);
- m.impl("scatter_add",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::scatter_add))
- );
- m.impl_UNBOXED("scatter_add.dimname", &VariableType::scatter_add_dimname);
m.impl_UNBOXED("scatter_add_", &VariableType::scatter_add_);
m.impl("searchsorted.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::searchsorted_Tensor))
@@ -42409,14 +39521,9 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::searchsorted_Scalar))
);
m.impl_UNBOXED("searchsorted.Tensor_out", &VariableType::searchsorted_out_Tensor_out);
- m.impl_UNBOXED("select.Dimname", &VariableType::select_Dimname);
m.impl("select.int",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::select_int))
);
- m.impl("selu",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::selu))
- );
- m.impl_UNBOXED("selu_", &VariableType::selu_);
m.impl_UNBOXED("set_.source_Storage", &VariableType::set__source_Storage);
m.impl_UNBOXED("set_.source_Storage_storage_offset", &VariableType::set__source_Storage_storage_offset);
m.impl_UNBOXED("set_.source_Tensor", &VariableType::set__source_Tensor);
@@ -42436,17 +39543,11 @@
);
m.impl_UNBOXED("sign_", &VariableType::sign_);
m.impl_UNBOXED("sign.out", &VariableType::sign_out_out);
- m.impl("signbit",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::signbit))
- );
m.impl_UNBOXED("signbit.out", &VariableType::signbit_out_out);
m.impl("silu",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::silu))
);
m.impl_UNBOXED("silu_", &VariableType::silu_);
- m.impl("silu_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::silu_backward))
- );
m.impl_UNBOXED("silu.out", &VariableType::silu_out_out);
m.impl("sin",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sin))
@@ -42458,24 +39559,18 @@
);
m.impl_UNBOXED("sinh_", &VariableType::sinh_);
m.impl_UNBOXED("sinh.out", &VariableType::sinh_out_out);
- m.impl("size.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::size_int))
- );
- m.impl_UNBOXED("size.Dimname", &VariableType::size_Dimname);
m.impl("slice.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::slice_Tensor))
);
m.impl("slogdet",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::slogdet))
);
- m.impl_UNBOXED("slow_conv3d", &VariableType::slow_conv3d);
m.impl("slow_conv3d_backward.output_mask",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::slow_conv3d_backward_output_mask))
);
m.impl_UNBOXED("slow_conv3d_backward.grad_input", &VariableType::slow_conv3d_backward_out_grad_input);
m.impl_UNBOXED("slow_conv3d_forward", &VariableType::slow_conv3d_forward);
m.impl_UNBOXED("slow_conv3d_forward.output", &VariableType::slow_conv3d_forward_out_output);
- m.impl_UNBOXED("slow_conv3d.out", &VariableType::slow_conv3d_out_out);
m.impl_UNBOXED("slow_conv_dilated2d", &VariableType::slow_conv_dilated2d);
m.impl("slow_conv_dilated2d_backward",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::slow_conv_dilated2d_backward))
@@ -42496,9 +39591,6 @@
);
m.impl_UNBOXED("slow_conv_transpose3d_backward.grad_output", &VariableType::slow_conv_transpose3d_backward_out_grad_output);
m.impl_UNBOXED("slow_conv_transpose3d.out", &VariableType::slow_conv_transpose3d_out_out);
- m.impl("smm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::smm))
- );
m.impl("smooth_l1_loss",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::smooth_l1_loss))
);
@@ -42515,10 +39607,6 @@
);
m.impl_UNBOXED("soft_margin_loss_backward.grad_input", &VariableType::soft_margin_loss_backward_out_grad_input);
m.impl_UNBOXED("soft_margin_loss.out", &VariableType::soft_margin_loss_out_out);
- m.impl("softmax.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::softmax_int))
- );
- m.impl_UNBOXED("softmax.Dimname", &VariableType::softmax_Dimname);
m.impl("softplus",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::softplus))
);
@@ -42542,18 +39630,7 @@
m.impl("sort",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sort))
);
- m.impl_UNBOXED("sort.dimname", &VariableType::sort_dimname);
m.impl_UNBOXED("sort.values", &VariableType::sort_out_values);
- m.impl_UNBOXED("sort.dimname_values", &VariableType::sort_out_dimname_values);
- m.impl("sparse_coo_tensor.size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sparse_coo_tensor_size))
- );
- m.impl("sparse_coo_tensor.indices",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sparse_coo_tensor_indices))
- );
- m.impl("sparse_coo_tensor.indices_size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sparse_coo_tensor_indices_size))
- );
m.impl("sparse_dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sparse_dim))
);
@@ -42573,23 +39650,14 @@
);
m.impl_UNBOXED("sqrt_", &VariableType::sqrt_);
m.impl_UNBOXED("sqrt.out", &VariableType::sqrt_out_out);
- m.impl("square",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::square))
- );
- m.impl_UNBOXED("square_", &VariableType::square_);
m.impl("squeeze",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::squeeze))
);
m.impl("squeeze.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::squeeze_dim))
);
- m.impl_UNBOXED("squeeze.dimname", &VariableType::squeeze_dimname);
m.impl_UNBOXED("squeeze_", &VariableType::squeeze_);
m.impl_UNBOXED("squeeze_.dim", &VariableType::squeeze__dim);
- m.impl_UNBOXED("squeeze_.dimname", &VariableType::squeeze__dimname);
- m.impl("sspaddmm",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sspaddmm))
- );
m.impl_UNBOXED("sspaddmm.out", &VariableType::sspaddmm_out_out);
m.impl("stack",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::stack))
@@ -42601,21 +39669,13 @@
m.impl("std.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::std_dim))
);
- m.impl_UNBOXED("std.names_dim", &VariableType::std_names_dim);
m.impl("std_mean",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::std_mean))
);
m.impl("std_mean.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::std_mean_dim))
);
- m.impl_UNBOXED("std_mean.names_dim", &VariableType::std_mean_names_dim);
m.impl_UNBOXED("std.out", &VariableType::std_out_out);
- m.impl_UNBOXED("std.names_out", &VariableType::std_out_names_out);
- m.impl_UNBOXED("stft", &VariableType::stft);
- m.impl("stride.int",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::stride_int))
- );
- m.impl_UNBOXED("stride.Dimname", &VariableType::stride_Dimname);
m.impl("sub.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sub_Tensor))
);
@@ -42631,12 +39691,7 @@
m.impl("sum.dim_IntList",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sum_dim_IntList))
);
- m.impl_UNBOXED("sum.dim_DimnameList", &VariableType::sum_dim_DimnameList);
m.impl_UNBOXED("sum.IntList_out", &VariableType::sum_out_IntList_out);
- m.impl_UNBOXED("sum.DimnameList_out", &VariableType::sum_out_DimnameList_out);
- m.impl("sum_to_size",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::sum_to_size))
- );
m.impl("svd",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::svd))
);
@@ -42667,25 +39722,18 @@
);
m.impl_UNBOXED("tanh_backward.grad_input", &VariableType::tanh_backward_out_grad_input);
m.impl_UNBOXED("tanh.out", &VariableType::tanh_out_out);
- m.impl("tensordot",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::tensordot))
- );
- m.impl_UNBOXED("thnn_conv2d", &VariableType::thnn_conv2d);
m.impl("thnn_conv2d_backward.output_mask",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::thnn_conv2d_backward_output_mask))
);
m.impl_UNBOXED("thnn_conv2d_backward.grad_input", &VariableType::thnn_conv2d_backward_out_grad_input);
m.impl_UNBOXED("thnn_conv2d_forward", &VariableType::thnn_conv2d_forward);
m.impl_UNBOXED("thnn_conv2d_forward.output", &VariableType::thnn_conv2d_forward_out_output);
- m.impl_UNBOXED("thnn_conv2d.out", &VariableType::thnn_conv2d_out_out);
- m.impl_UNBOXED("thnn_conv_depthwise2d", &VariableType::thnn_conv_depthwise2d);
m.impl("thnn_conv_depthwise2d_backward.output_mask",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::thnn_conv_depthwise2d_backward_output_mask))
);
m.impl_UNBOXED("thnn_conv_depthwise2d_backward.grad_input", &VariableType::thnn_conv_depthwise2d_backward_out_grad_input);
m.impl_UNBOXED("thnn_conv_depthwise2d_forward", &VariableType::thnn_conv_depthwise2d_forward);
m.impl_UNBOXED("thnn_conv_depthwise2d_forward.out", &VariableType::thnn_conv_depthwise2d_forward_out_out);
- m.impl_UNBOXED("thnn_conv_depthwise2d.out", &VariableType::thnn_conv_depthwise2d_out_out);
m.impl("threshold",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::threshold))
);
@@ -42694,30 +39742,12 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::threshold_backward))
);
m.impl_UNBOXED("threshold.out", &VariableType::threshold_out_out);
- m.impl("to.dtype_layout",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_dtype_layout))
- );
- m.impl("to.device",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_device))
- );
- m.impl("to.dtype",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_dtype))
- );
- m.impl("to.other",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_other))
- );
m.impl("to_dense",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_dense))
);
- m.impl("to_dense_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_dense_backward))
- );
m.impl("to_mkldnn",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_mkldnn))
);
- m.impl("to_mkldnn_backward",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_mkldnn_backward))
- );
m.impl("to_sparse.sparse_dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::to_sparse_sparse_dim))
);
@@ -42734,14 +39764,7 @@
m.impl("transpose.int",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::transpose_int))
);
- m.impl_UNBOXED("transpose.Dimname", &VariableType::transpose_Dimname);
m.impl_UNBOXED("transpose_", &VariableType::transpose_);
- m.impl("trapz.x",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::trapz_x))
- );
- m.impl("trapz.dx",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::trapz_dx))
- );
m.impl("triangular_solve",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::triangular_solve))
);
@@ -42754,9 +39777,6 @@
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::tril_indices))
);
m.impl_UNBOXED("tril.out", &VariableType::tril_out_out);
- m.impl("triplet_margin_loss",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::triplet_margin_loss))
- );
m.impl("triu",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::triu))
);
@@ -42779,15 +39799,9 @@
);
m.impl_UNBOXED("trunc_", &VariableType::trunc_);
m.impl_UNBOXED("trunc.out", &VariableType::trunc_out_out);
- m.impl("type_as",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::type_as))
- );
m.impl("unbind.int",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::unbind_int))
);
- m.impl_UNBOXED("unbind.Dimname", &VariableType::unbind_Dimname);
- m.impl_UNBOXED("unflatten.Dimname", &VariableType::unflatten_Dimname);
- m.impl_UNBOXED("unflatten.int", &VariableType::unflatten_int);
m.impl("unfold",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::unfold))
);
@@ -42804,9 +39818,6 @@
m.impl("unique_dim_consecutive",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::unique_dim_consecutive))
);
- m.impl("unsafe_chunk",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::unsafe_chunk))
- );
m.impl("unsafe_split.Tensor",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::unsafe_split_Tensor))
);
@@ -42890,61 +39901,29 @@
m.impl("values",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::values))
);
- m.impl("vander",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::vander))
- );
m.impl("var",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::var))
);
m.impl("var.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::var_dim))
);
- m.impl_UNBOXED("var.names_dim", &VariableType::var_names_dim);
m.impl("var_mean",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::var_mean))
);
m.impl("var_mean.dim",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::var_mean_dim))
);
- m.impl_UNBOXED("var_mean.names_dim", &VariableType::var_mean_names_dim);
m.impl_UNBOXED("var.out", &VariableType::var_out_out);
- m.impl_UNBOXED("var.names_out", &VariableType::var_out_names_out);
m.impl("view",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::view))
);
- m.impl("view_as",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::view_as))
- );
m.impl("view_as_complex",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::view_as_complex))
);
m.impl("view_as_real",
c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::view_as_real))
);
- m.impl("where.self",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::where_self))
- );
- m.impl("where.ScalarSelf",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::where_ScalarSelf))
- );
- m.impl("where.ScalarOther",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::where_ScalarOther))
- );
- m.impl("where.Scalar",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::where_Scalar))
- );
- m.impl("where",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::where))
- );
m.impl_UNBOXED("zero_", &VariableType::zero_);
- m.impl_UNBOXED("zeros.names", &VariableType::zeros_names);
- m.impl("zeros",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::zeros))
- );
- m.impl("zeros_like",
- c10::impl::hacky_wrapper_for_legacy_signatures(TORCH_FN(VariableType::zeros_like))
- );
- m.impl_UNBOXED("zeros.out", &VariableType::zeros_out_out);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment