Forked from dzhulgakov/gist:87050fcf7527730bea6483600f8c4897
Created
September 19, 2019 17:31
-
-
Save jamesr66a/eeaba3ceb62422cb838d176511d38214 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/build/aten/src/ATen/Declarations.yaml b/build/aten/src/ATen/Declarations.yaml | |
index 0249f2d66b..3909d80a00 100644 | |
--- a/build/aten/src/ATen/Declarations.yaml | |
+++ b/build/aten/src/ATen/Declarations.yaml | |
@@ -2,6 +2,7 @@ | |
operator_name: _cast_Byte | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -37,6 +38,7 @@ | |
operator_name: _cast_Char | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -72,6 +74,7 @@ | |
operator_name: _cast_Double | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -107,6 +110,7 @@ | |
operator_name: _cast_Float | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -142,6 +146,7 @@ | |
operator_name: _cast_Int | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -177,6 +182,7 @@ | |
operator_name: _cast_Long | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -212,6 +218,7 @@ | |
operator_name: _cast_Short | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -247,6 +254,7 @@ | |
operator_name: _cast_Half | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -282,6 +290,7 @@ | |
operator_name: backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::backward(Tensor self, Tensor? gradient=None, bool keep_graph=False, bool create_graph=False) -> void | |
method_prefix_derived: '' | |
@@ -329,6 +338,7 @@ | |
operator_name: set_data | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::set_data(Tensor(a!) self, Tensor new_data) -> void | |
method_prefix_derived: '' | |
@@ -363,6 +373,7 @@ | |
operator_name: data | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::data(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -392,6 +403,7 @@ | |
operator_name: names_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -426,6 +438,7 @@ | |
operator_name: renamed | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::renamed(Tensor(a) self, Dimname[]? names) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -460,6 +473,7 @@ | |
operator_name: align_to | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::align_to(Tensor(a) self, DimnameList names) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -495,6 +509,7 @@ | |
operator_name: align_as | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::align_as(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -529,6 +544,7 @@ | |
operator_name: align_tensors | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::align_tensors(Tensor[] tensors) -> Tensor[] | |
method_prefix_derived: '' | |
@@ -558,6 +574,7 @@ | |
operator_name: refine_names | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::refine_names(Tensor(a) self, DimnameList names) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -592,6 +609,7 @@ | |
operator_name: unflatten | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unflatten(Tensor self, Dimname dim, int[] sizes, DimnameList names) -> Tensor | |
method_prefix_derived: '' | |
@@ -636,6 +654,7 @@ | |
operator_name: unflatten | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unflatten(Tensor self, int dim, int[] sizes, DimnameList names) -> Tensor | |
method_prefix_derived: '' | |
@@ -680,6 +699,7 @@ | |
operator_name: _cudnn_ctc_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -742,6 +762,7 @@ | |
operator_name: _cudnn_rnn_flatten_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor | |
method_prefix_derived: '' | |
@@ -806,6 +827,7 @@ | |
operator_name: _cudnn_rnn | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -917,6 +939,7 @@ | |
operator_name: _cudnn_rnn_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) | |
method_prefix_derived: '' | |
@@ -1055,6 +1078,7 @@ | |
operator_name: _cudnn_init_dropout_state | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -1100,6 +1124,7 @@ | |
operator_name: _debug_has_internal_overlap | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_debug_has_internal_overlap(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -1129,6 +1154,7 @@ | |
operator_name: _fused_dropout | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -1172,6 +1198,7 @@ | |
operator_name: _masked_scale | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor | |
method_prefix_derived: '' | |
@@ -1211,6 +1238,7 @@ | |
operator_name: _sobol_engine_draw | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -1268,6 +1296,7 @@ | |
operator_name: _sobol_engine_ff_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1317,6 +1346,7 @@ | |
operator_name: _sobol_engine_scramble_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1356,6 +1386,7 @@ | |
operator_name: _sobol_engine_initialize_state_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1390,6 +1421,7 @@ | |
operator_name: _reshape_from_tensor | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor | |
method_prefix_derived: '' | |
@@ -1424,6 +1456,7 @@ | |
operator_name: _shape_as_tensor | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_shape_as_tensor(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -1453,6 +1486,7 @@ | |
operator_name: dropout | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dropout(Tensor input, float p, bool train) -> Tensor | |
method_prefix_derived: '' | |
@@ -1492,6 +1526,7 @@ | |
operator_name: dropout_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1531,6 +1566,7 @@ | |
operator_name: feature_dropout | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::feature_dropout(Tensor input, float p, bool train) -> Tensor | |
method_prefix_derived: '' | |
@@ -1570,6 +1606,7 @@ | |
operator_name: feature_dropout_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1609,6 +1646,7 @@ | |
operator_name: alpha_dropout | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor | |
method_prefix_derived: '' | |
@@ -1648,6 +1686,7 @@ | |
operator_name: alpha_dropout_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1687,6 +1726,7 @@ | |
operator_name: feature_alpha_dropout | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor | |
method_prefix_derived: '' | |
@@ -1726,6 +1766,7 @@ | |
operator_name: feature_alpha_dropout_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1765,6 +1806,7 @@ | |
operator_name: abs | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::abs(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -1795,6 +1837,7 @@ | |
operator_name: abs_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::abs_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1825,6 +1868,7 @@ | |
operator_name: abs | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1862,6 +1906,7 @@ | |
operator_name: acos | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::acos(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -1892,6 +1937,7 @@ | |
operator_name: acos_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::acos_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1922,6 +1968,7 @@ | |
operator_name: acos | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -1959,6 +2006,7 @@ | |
operator_name: avg_pool1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -2020,6 +2068,7 @@ | |
operator_name: adaptive_avg_pool1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -2055,6 +2104,7 @@ | |
operator_name: adaptive_max_pool1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -2093,6 +2143,7 @@ | |
operator_name: add | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -2135,6 +2186,7 @@ | |
operator_name: add_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2176,6 +2228,7 @@ | |
operator_name: add | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2225,6 +2278,7 @@ | |
operator_name: add | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -2266,6 +2320,7 @@ | |
operator_name: add_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2306,6 +2361,7 @@ | |
operator_name: addmv | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -2360,6 +2416,7 @@ | |
operator_name: addmv_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2414,6 +2471,7 @@ | |
operator_name: addmv | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2475,6 +2533,7 @@ | |
operator_name: addr | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -2529,6 +2588,7 @@ | |
operator_name: addr_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2582,6 +2642,7 @@ | |
operator_name: addr | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2643,6 +2704,7 @@ | |
operator_name: affine_grid_generator | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -2682,6 +2744,7 @@ | |
operator_name: affine_grid_generator_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -2721,6 +2784,7 @@ | |
operator_name: all | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -2762,6 +2826,7 @@ | |
operator_name: all | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2810,6 +2875,7 @@ | |
operator_name: allclose | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool | |
method_prefix_derived: '' | |
@@ -2863,6 +2929,7 @@ | |
operator_name: any | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -2904,6 +2971,7 @@ | |
operator_name: any | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -2952,6 +3020,7 @@ | |
operator_name: arange | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -2988,6 +3057,7 @@ | |
operator_name: arange | |
overload_name: start | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -3029,6 +3099,7 @@ | |
operator_name: arange | |
overload_name: start_step | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -3075,6 +3146,7 @@ | |
operator_name: arange | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3112,6 +3184,7 @@ | |
operator_name: arange | |
overload_name: start_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3160,6 +3233,7 @@ | |
operator_name: _dim_arange | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_dim_arange(Tensor like, int dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -3194,6 +3268,7 @@ | |
operator_name: argmax | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -3236,6 +3311,7 @@ | |
operator_name: argmin | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -3278,6 +3354,7 @@ | |
operator_name: as_strided | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -3324,6 +3401,7 @@ | |
operator_name: as_strided_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3370,6 +3448,7 @@ | |
operator_name: asin | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::asin(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -3400,6 +3479,7 @@ | |
operator_name: asin_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::asin_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3430,6 +3510,7 @@ | |
operator_name: asin | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3467,6 +3548,7 @@ | |
operator_name: atan | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::atan(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -3497,6 +3579,7 @@ | |
operator_name: atan_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::atan_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3527,6 +3610,7 @@ | |
operator_name: atan | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3564,6 +3648,7 @@ | |
operator_name: baddbmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -3618,6 +3703,7 @@ | |
operator_name: baddbmm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3671,6 +3757,7 @@ | |
operator_name: _baddbmm_mkl_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_baddbmm_mkl_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3724,6 +3811,7 @@ | |
operator_name: baddbmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -3785,6 +3873,7 @@ | |
operator_name: bartlett_window | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -3821,6 +3910,7 @@ | |
operator_name: bartlett_window | |
overload_name: periodic | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -3862,6 +3952,7 @@ | |
operator_name: batch_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor | |
method_prefix_derived: '' | |
@@ -3931,6 +4022,7 @@ | |
operator_name: _batch_norm_impl_index | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, int) | |
method_prefix_derived: '' | |
@@ -4009,6 +4101,7 @@ | |
operator_name: _batch_norm_impl_index_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -4094,6 +4187,7 @@ | |
operator_name: bernoulli | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -4131,6 +4225,7 @@ | |
operator_name: bernoulli | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4175,6 +4270,7 @@ | |
operator_name: bernoulli_ | |
overload_name: Tensor | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4216,6 +4312,7 @@ | |
operator_name: bernoulli_ | |
overload_name: float | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4258,6 +4355,7 @@ | |
operator_name: bernoulli | |
overload_name: p | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -4300,6 +4398,7 @@ | |
operator_name: bilinear | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> Tensor | |
method_prefix_derived: '' | |
@@ -4344,6 +4443,7 @@ | |
operator_name: binary_cross_entropy_with_logits | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -4396,6 +4496,7 @@ | |
operator_name: binary_cross_entropy_with_logits_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -4453,6 +4554,7 @@ | |
operator_name: bincount | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -4495,6 +4597,7 @@ | |
operator_name: bitwise_not | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bitwise_not(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -4525,6 +4628,7 @@ | |
operator_name: bitwise_not_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4554,6 +4658,7 @@ | |
operator_name: bitwise_not | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4591,6 +4696,7 @@ | |
operator_name: logical_not | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logical_not(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -4621,6 +4727,7 @@ | |
operator_name: logical_not_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4650,6 +4757,7 @@ | |
operator_name: logical_not | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4687,6 +4795,7 @@ | |
operator_name: logical_xor | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -4722,6 +4831,7 @@ | |
operator_name: logical_xor_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4756,6 +4866,7 @@ | |
operator_name: logical_xor | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4798,6 +4909,7 @@ | |
operator_name: blackman_window | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -4834,6 +4946,7 @@ | |
operator_name: blackman_window | |
overload_name: periodic | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -4875,6 +4988,7 @@ | |
operator_name: bmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bmm(Tensor self, Tensor mat2) -> Tensor | |
method_prefix_derived: '' | |
@@ -4910,6 +5024,7 @@ | |
operator_name: bmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -4952,6 +5067,7 @@ | |
operator_name: broadcast_tensors | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] | |
method_prefix_derived: '' | |
@@ -4981,6 +5097,7 @@ | |
operator_name: cat | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cat(Tensor[] tensors, int dim=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -5016,6 +5133,7 @@ | |
operator_name: cat | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5059,6 +5177,7 @@ | |
operator_name: cat | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -5093,6 +5212,7 @@ | |
operator_name: cat | |
overload_name: names_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5135,6 +5255,7 @@ | |
operator_name: ceil | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ceil(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -5165,6 +5286,7 @@ | |
operator_name: ceil_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ceil_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5195,6 +5317,7 @@ | |
operator_name: ceil | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5232,6 +5355,7 @@ | |
operator_name: chain_matmul | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::chain_matmul(Tensor[] matrices) -> Tensor | |
method_prefix_derived: '' | |
@@ -5261,6 +5385,7 @@ | |
operator_name: chunk | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[] | |
method_prefix_derived: '' | |
@@ -5302,6 +5427,7 @@ | |
operator_name: clamp | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -5344,6 +5470,7 @@ | |
operator_name: clamp_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5386,6 +5513,7 @@ | |
operator_name: clamp | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5435,6 +5563,7 @@ | |
operator_name: clamp_max | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_max(Tensor self, Scalar max) -> Tensor | |
method_prefix_derived: '' | |
@@ -5470,6 +5599,7 @@ | |
operator_name: clamp_max_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5505,6 +5635,7 @@ | |
operator_name: clamp_max | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5547,6 +5678,7 @@ | |
operator_name: clamp_min | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_min(Tensor self, Scalar min) -> Tensor | |
method_prefix_derived: '' | |
@@ -5582,6 +5714,7 @@ | |
operator_name: clamp_min_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5617,6 +5750,7 @@ | |
operator_name: clamp_min | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -5659,6 +5793,7 @@ | |
operator_name: cudnn_is_acceptable | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_is_acceptable(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -5688,6 +5823,7 @@ | |
operator_name: constant_pad_nd | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -5728,6 +5864,7 @@ | |
operator_name: contiguous | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::contiguous(Tensor self, *, MemoryFormat memory_format=contiguous_format) -> Tensor | |
method_prefix_derived: '' | |
@@ -5764,6 +5901,7 @@ | |
operator_name: convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor | |
method_prefix_derived: '' | |
@@ -5833,6 +5971,7 @@ | |
operator_name: convolution_overrideable | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor | |
method_prefix_derived: '' | |
@@ -5902,6 +6041,7 @@ | |
operator_name: convolution_backward_overrideable | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -5985,6 +6125,7 @@ | |
operator_name: _convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor | |
method_prefix_derived: '' | |
@@ -6069,6 +6210,7 @@ | |
operator_name: _convolution_nogroup | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -6133,6 +6275,7 @@ | |
operator_name: _convolution_double_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -6243,6 +6386,7 @@ | |
operator_name: conv1d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -6310,6 +6454,7 @@ | |
operator_name: conv2d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -6377,6 +6522,7 @@ | |
operator_name: conv3d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -6444,6 +6590,7 @@ | |
operator_name: conv_tbc | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -6489,6 +6636,7 @@ | |
operator_name: conv_tbc_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -6544,6 +6692,7 @@ | |
operator_name: conv_transpose1d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -6618,6 +6767,7 @@ | |
operator_name: conv_transpose2d | |
overload_name: input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -6692,6 +6842,7 @@ | |
operator_name: conv_transpose3d | |
overload_name: input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -6766,6 +6917,7 @@ | |
operator_name: copy_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -6806,6 +6958,7 @@ | |
operator_name: _copy_from | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -6846,6 +6999,7 @@ | |
operator_name: cos | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cos(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -6876,6 +7030,7 @@ | |
operator_name: cos_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cos_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -6906,6 +7061,7 @@ | |
operator_name: cos | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -6943,6 +7099,7 @@ | |
operator_name: cosh | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cosh(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -6973,6 +7130,7 @@ | |
operator_name: cosh_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cosh_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -7003,6 +7161,7 @@ | |
operator_name: cosh | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -7040,6 +7199,7 @@ | |
operator_name: cosine_embedding_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -7091,6 +7251,7 @@ | |
operator_name: cudnn_affine_grid_generator | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid | |
method_prefix_derived: '' | |
@@ -7141,6 +7302,7 @@ | |
operator_name: cudnn_affine_grid_generator_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta | |
method_prefix_derived: '' | |
@@ -7191,6 +7353,7 @@ | |
operator_name: cudnn_batch_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -7261,6 +7424,7 @@ | |
operator_name: cudnn_batch_norm_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -7331,6 +7495,7 @@ | |
operator_name: cudnn_convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -7400,6 +7565,7 @@ | |
operator_name: cudnn_convolution_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -7469,6 +7635,7 @@ | |
operator_name: cudnn_convolution_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -7549,6 +7716,7 @@ | |
operator_name: cudnn_convolution_backward_bias | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_backward_bias(Tensor grad_output) -> Tensor | |
method_prefix_derived: '' | |
@@ -7578,6 +7746,7 @@ | |
operator_name: cudnn_convolution_backward_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -7647,6 +7816,7 @@ | |
operator_name: cudnn_convolution_transpose | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -7721,6 +7891,7 @@ | |
operator_name: cudnn_convolution_transpose_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -7806,6 +7977,7 @@ | |
operator_name: cudnn_convolution_transpose_backward_bias | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_transpose_backward_bias(Tensor grad_output) -> Tensor | |
method_prefix_derived: '' | |
@@ -7835,6 +8007,7 @@ | |
operator_name: cudnn_convolution_transpose_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -7899,6 +8072,7 @@ | |
operator_name: cudnn_convolution_transpose_backward_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -7968,6 +8142,7 @@ | |
operator_name: cudnn_grid_sampler | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output | |
method_prefix_derived: '' | |
@@ -8003,6 +8178,7 @@ | |
operator_name: cudnn_grid_sampler_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) | |
method_prefix_derived: '' | |
@@ -8047,6 +8223,7 @@ | |
operator_name: cumsum | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -8089,6 +8266,7 @@ | |
operator_name: cumsum | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8138,6 +8316,7 @@ | |
operator_name: cumprod | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -8180,6 +8359,7 @@ | |
operator_name: cumprod | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8229,6 +8409,7 @@ | |
operator_name: ctc_loss | |
overload_name: IntList | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -8291,6 +8472,7 @@ | |
operator_name: ctc_loss | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -8353,6 +8535,7 @@ | |
operator_name: _ctc_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -8412,6 +8595,7 @@ | |
operator_name: _ctc_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -8482,6 +8666,7 @@ | |
operator_name: det | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::det(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -8512,6 +8697,7 @@ | |
operator_name: diag_embed | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor | |
method_prefix_derived: '' | |
@@ -8560,6 +8746,7 @@ | |
operator_name: diagflat | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::diagflat(Tensor self, int offset=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -8596,6 +8783,7 @@ | |
operator_name: diagonal | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -8644,6 +8832,7 @@ | |
operator_name: fill_diagonal_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8684,6 +8873,7 @@ | |
operator_name: div | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::div.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -8719,6 +8909,7 @@ | |
operator_name: div_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8753,6 +8944,7 @@ | |
operator_name: div | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8795,6 +8987,7 @@ | |
operator_name: div | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::div.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -8830,6 +9023,7 @@ | |
operator_name: div_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8864,6 +9058,7 @@ | |
operator_name: dot | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dot(Tensor self, Tensor tensor) -> Tensor | |
method_prefix_derived: '' | |
@@ -8899,6 +9094,7 @@ | |
operator_name: dot | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -8941,6 +9137,7 @@ | |
operator_name: einsum | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::einsum(str equation, Tensor[] tensors) -> Tensor | |
method_prefix_derived: '' | |
@@ -8975,6 +9172,7 @@ | |
operator_name: embedding | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -9027,6 +9225,7 @@ | |
operator_name: embedding_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor | |
method_prefix_derived: '' | |
@@ -9081,6 +9280,7 @@ | |
operator_name: embedding_dense_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor | |
method_prefix_derived: '' | |
@@ -9130,6 +9330,7 @@ | |
operator_name: embedding_renorm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -9174,6 +9375,7 @@ | |
operator_name: embedding_sparse_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor | |
method_prefix_derived: '' | |
@@ -9223,6 +9425,7 @@ | |
operator_name: embedding_bag | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None) -> (Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -9295,6 +9498,7 @@ | |
operator_name: _embedding_bag | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None) -> (Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -9367,6 +9571,7 @@ | |
operator_name: _embedding_bag_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights) -> Tensor | |
method_prefix_derived: '' | |
@@ -9446,6 +9651,7 @@ | |
operator_name: _embedding_bag_sparse_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor | |
method_prefix_derived: '' | |
@@ -9515,6 +9721,7 @@ | |
operator_name: _embedding_bag_dense_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor | |
method_prefix_derived: '' | |
@@ -9589,6 +9796,7 @@ | |
operator_name: _embedding_bag_per_sample_weights_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode) -> Tensor | |
method_prefix_derived: '' | |
@@ -9643,6 +9851,7 @@ | |
operator_name: empty | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -9692,6 +9901,7 @@ | |
operator_name: empty | |
overload_name: memory_format | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -9735,6 +9945,7 @@ | |
operator_name: new_empty | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -9776,6 +9987,7 @@ | |
operator_name: new_full | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -9822,6 +10034,7 @@ | |
operator_name: _empty_affine_quantized | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
method_prefix_derived: '' | |
@@ -9875,32 +10088,36 @@ | |
device_guard: true | |
with_gil: false | |
deprecated: false | |
-- name: _empty_per_channel_affine_quantized_like | |
- operator_name: _empty_per_channel_affine_quantized_like | |
+- name: _empty_per_channel_affine_quantized | |
+ operator_name: _empty_per_channel_affine_quantized | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: factory | |
matches_jit_signature: true | |
- schema_string: aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
+ schema_string: aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
method_prefix_derived: '' | |
arguments: | |
+ - annotation: null | |
+ dynamic_type: IntArrayRef | |
+ is_nullable: false | |
+ name: size | |
+ type: IntArrayRef | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
- name: self | |
+ kwarg_only: true | |
+ name: scales | |
type: const Tensor & | |
- annotation: null | |
dynamic_type: Tensor | |
is_nullable: false | |
+ kwarg_only: true | |
name: zero_points | |
type: const Tensor & | |
- annotation: null | |
dynamic_type: IntArrayRef | |
is_nullable: false | |
- name: size | |
- type: IntArrayRef | |
- - annotation: null | |
- dynamic_type: IntArrayRef | |
- is_nullable: false | |
+ kwarg_only: true | |
name: axis | |
type: IntArrayRef | |
- annotation: null | |
@@ -9937,6 +10154,7 @@ | |
operator_name: resize_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::resize_(Tensor(a!) self, int[] size) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -9971,6 +10189,7 @@ | |
operator_name: empty | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10015,6 +10234,7 @@ | |
operator_name: empty_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::empty_like(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -10044,6 +10264,7 @@ | |
operator_name: empty_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::empty_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
method_prefix_derived: '' | |
@@ -10086,6 +10307,7 @@ | |
operator_name: empty_strided | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -10127,6 +10349,7 @@ | |
operator_name: erf | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erf(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -10157,6 +10380,7 @@ | |
operator_name: erf_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erf_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10187,6 +10411,7 @@ | |
operator_name: erf | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10224,6 +10449,7 @@ | |
operator_name: erfc | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erfc(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -10254,6 +10480,7 @@ | |
operator_name: erfc_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erfc_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10284,6 +10511,7 @@ | |
operator_name: erfc | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10321,6 +10549,7 @@ | |
operator_name: exp | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::exp(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -10351,6 +10580,7 @@ | |
operator_name: exp_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::exp_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10381,6 +10611,7 @@ | |
operator_name: exp | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10418,6 +10649,7 @@ | |
operator_name: expm1 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::expm1(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -10448,6 +10680,7 @@ | |
operator_name: expm1_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::expm1_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10478,6 +10711,7 @@ | |
operator_name: expm1 | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10515,6 +10749,7 @@ | |
operator_name: expand | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -10556,6 +10791,7 @@ | |
operator_name: expand_as | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::expand_as(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -10590,6 +10826,7 @@ | |
operator_name: eye | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -10626,6 +10863,7 @@ | |
operator_name: eye | |
overload_name: m | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -10667,6 +10905,7 @@ | |
operator_name: eye | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10704,6 +10943,7 @@ | |
operator_name: eye | |
overload_name: m_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10746,6 +10986,7 @@ | |
operator_name: flatten | |
overload_name: using_ints | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::flatten.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -> Tensor | |
method_prefix_derived: '' | |
@@ -10788,6 +11029,7 @@ | |
operator_name: flatten | |
overload_name: named_out_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::flatten.named_out_dim(Tensor self, int start_dim, int end_dim, Dimname out_dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -10833,6 +11075,7 @@ | |
operator_name: flatten | |
overload_name: using_names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::flatten.using_names(Tensor self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -10878,6 +11121,7 @@ | |
operator_name: flatten | |
overload_name: DimnameList | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::flatten.DimnameList(Tensor self, DimnameList dims, Dimname out_dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -10918,6 +11162,7 @@ | |
operator_name: fill_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10953,6 +11198,7 @@ | |
operator_name: fill_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -10988,6 +11234,7 @@ | |
operator_name: floor | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::floor(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -11018,6 +11265,7 @@ | |
operator_name: floor_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::floor_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -11048,6 +11296,7 @@ | |
operator_name: floor | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -11085,6 +11334,7 @@ | |
operator_name: frac | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::frac(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -11115,6 +11365,7 @@ | |
operator_name: frac_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::frac_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -11145,6 +11396,7 @@ | |
operator_name: frac | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -11182,6 +11434,7 @@ | |
operator_name: full | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11229,6 +11482,7 @@ | |
operator_name: full | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11270,6 +11524,7 @@ | |
operator_name: full | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -11312,6 +11567,7 @@ | |
operator_name: full_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::full_like(Tensor self, Scalar fill_value) -> Tensor | |
method_prefix_derived: '' | |
@@ -11346,6 +11602,7 @@ | |
operator_name: full_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::full_like.dtype(Tensor self, Scalar fill_value, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -11386,6 +11643,7 @@ | |
operator_name: from_file | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11434,6 +11692,7 @@ | |
operator_name: grid_sampler | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -11483,6 +11742,7 @@ | |
operator_name: grid_sampler_2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -11532,6 +11792,7 @@ | |
operator_name: grid_sampler_2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -11589,6 +11850,7 @@ | |
operator_name: grid_sampler_3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -11638,6 +11900,7 @@ | |
operator_name: grid_sampler_3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -11695,6 +11958,7 @@ | |
operator_name: hann_window | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11731,6 +11995,7 @@ | |
operator_name: hann_window | |
overload_name: periodic | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11772,6 +12037,7 @@ | |
operator_name: hamming_window | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11808,6 +12074,7 @@ | |
operator_name: hamming_window | |
overload_name: periodic | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11849,6 +12116,7 @@ | |
operator_name: hamming_window | |
overload_name: periodic_alpha | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11895,6 +12163,7 @@ | |
operator_name: hamming_window | |
overload_name: periodic_alpha_beta | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -11946,6 +12215,7 @@ | |
operator_name: hinge_embedding_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -11992,6 +12262,7 @@ | |
operator_name: ger | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ger(Tensor self, Tensor vec2) -> Tensor | |
method_prefix_derived: '' | |
@@ -12027,6 +12298,7 @@ | |
operator_name: ger | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -12069,6 +12341,7 @@ | |
operator_name: group_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -12127,6 +12400,7 @@ | |
operator_name: fft | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -12168,6 +12442,7 @@ | |
operator_name: ifft | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -12209,6 +12484,7 @@ | |
operator_name: rfft | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -12256,6 +12532,7 @@ | |
operator_name: irfft | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor | |
method_prefix_derived: '' | |
@@ -12309,6 +12586,7 @@ | |
operator_name: _fft_with_size | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor | |
method_prefix_derived: '' | |
@@ -12378,6 +12656,7 @@ | |
operator_name: _cufft_get_plan_cache_size | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cufft_get_plan_cache_size(int device_index) -> int | |
method_prefix_derived: '' | |
@@ -12407,6 +12686,7 @@ | |
operator_name: _cufft_get_plan_cache_max_size | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cufft_get_plan_cache_max_size(int device_index) -> int | |
method_prefix_derived: '' | |
@@ -12436,6 +12716,7 @@ | |
operator_name: _cufft_set_plan_cache_max_size | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> void | |
method_prefix_derived: '' | |
@@ -12470,6 +12751,7 @@ | |
operator_name: _cufft_clear_plan_cache | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cufft_clear_plan_cache(int device_index) -> void | |
method_prefix_derived: '' | |
@@ -12499,6 +12781,7 @@ | |
operator_name: index | |
overload_name: Tensor | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -12534,6 +12817,7 @@ | |
operator_name: index_copy_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -12578,6 +12862,7 @@ | |
operator_name: index_copy | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor | |
method_prefix_derived: '' | |
@@ -12623,6 +12908,7 @@ | |
operator_name: index_put_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -12669,6 +12955,7 @@ | |
operator_name: index_put | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -12715,6 +13002,7 @@ | |
operator_name: _index_put_impl_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -12766,6 +13054,7 @@ | |
operator_name: instance_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor | |
method_prefix_derived: '' | |
@@ -12835,6 +13124,7 @@ | |
operator_name: inverse | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::inverse(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -12865,6 +13155,7 @@ | |
operator_name: inverse | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -12902,6 +13193,7 @@ | |
operator_name: _inverse_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_inverse_helper(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -12931,6 +13223,7 @@ | |
operator_name: isclose | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -12984,6 +13277,7 @@ | |
operator_name: isnan | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::isnan(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -13013,6 +13307,7 @@ | |
operator_name: is_distributed | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_distributed(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -13043,6 +13338,7 @@ | |
operator_name: is_floating_point | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_floating_point(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -13073,6 +13369,7 @@ | |
operator_name: is_complex | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_complex(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -13103,6 +13400,7 @@ | |
operator_name: is_nonzero | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_nonzero(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -13133,6 +13431,7 @@ | |
operator_name: is_same_size | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_same_size(Tensor self, Tensor other) -> bool | |
method_prefix_derived: '' | |
@@ -13168,6 +13467,7 @@ | |
operator_name: is_signed | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_signed(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -13198,6 +13498,7 @@ | |
operator_name: kl_div | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::kl_div(Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -13238,6 +13539,7 @@ | |
operator_name: kl_div_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -13283,6 +13585,7 @@ | |
operator_name: kthvalue | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -13335,6 +13638,7 @@ | |
operator_name: kthvalue | |
overload_name: values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -13404,6 +13708,7 @@ | |
operator_name: layer_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -13462,6 +13767,7 @@ | |
operator_name: native_layer_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::native_layer_norm(Tensor input, Tensor? weight, Tensor? bias, int M, int N, float eps) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -13522,6 +13828,7 @@ | |
operator_name: native_layer_norm_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::native_layer_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -13592,6 +13899,7 @@ | |
operator_name: native_layer_norm_double_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::native_layer_norm_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -13677,6 +13985,7 @@ | |
operator_name: linear | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -13717,6 +14026,7 @@ | |
operator_name: mkldnn_linear | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -13757,6 +14067,7 @@ | |
operator_name: fbgemm_linear_int8_weight_fp32_activation | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor | |
method_prefix_derived: '' | |
@@ -13816,6 +14127,7 @@ | |
operator_name: fbgemm_linear_int8_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor | |
method_prefix_derived: '' | |
@@ -13875,6 +14187,7 @@ | |
operator_name: fbgemm_linear_quantize_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) | |
method_prefix_derived: '' | |
@@ -13913,6 +14226,7 @@ | |
operator_name: fbgemm_pack_gemm_matrix_fp16 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor | |
method_prefix_derived: '' | |
@@ -13942,6 +14256,7 @@ | |
operator_name: fbgemm_linear_fp16_weight_fp32_activation | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor | |
method_prefix_derived: '' | |
@@ -13981,6 +14296,7 @@ | |
operator_name: fbgemm_linear_fp16_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor | |
method_prefix_derived: '' | |
@@ -14020,6 +14336,7 @@ | |
operator_name: fbgemm_pack_quantized_matrix | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor | |
method_prefix_derived: '' | |
@@ -14049,6 +14366,7 @@ | |
operator_name: fbgemm_pack_quantized_matrix | |
overload_name: KN | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor | |
method_prefix_derived: '' | |
@@ -14088,6 +14406,7 @@ | |
operator_name: fbgemm_is_cpu_supported | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fbgemm_is_cpu_supported() -> bool | |
method_prefix_derived: '' | |
@@ -14112,6 +14431,7 @@ | |
operator_name: linspace | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::linspace(Scalar start, Scalar end, int steps=100, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -14159,6 +14479,7 @@ | |
operator_name: linspace | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::linspace.out(Scalar start, Scalar end, int steps=100, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14207,6 +14528,7 @@ | |
operator_name: log | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -14237,6 +14559,7 @@ | |
operator_name: log_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14267,6 +14590,7 @@ | |
operator_name: log | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14304,6 +14628,7 @@ | |
operator_name: log10 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log10(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -14334,6 +14659,7 @@ | |
operator_name: log10_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log10_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14364,6 +14690,7 @@ | |
operator_name: log10 | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14401,6 +14728,7 @@ | |
operator_name: log1p | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log1p(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -14431,6 +14759,7 @@ | |
operator_name: log1p_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log1p_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14461,6 +14790,7 @@ | |
operator_name: log1p | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14498,6 +14828,7 @@ | |
operator_name: log2 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log2(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -14528,6 +14859,7 @@ | |
operator_name: log2_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log2_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14558,6 +14890,7 @@ | |
operator_name: log2 | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14595,6 +14928,7 @@ | |
operator_name: logdet | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logdet(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -14625,6 +14959,7 @@ | |
operator_name: logspace | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logspace(Scalar start, Scalar end, int steps=100, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -14678,6 +15013,7 @@ | |
operator_name: logspace | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logspace.out(Scalar start, Scalar end, int steps=100, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14732,6 +15068,7 @@ | |
operator_name: log_softmax | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -14773,6 +15110,7 @@ | |
operator_name: log_softmax | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -14815,6 +15153,7 @@ | |
operator_name: _log_softmax | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor | |
method_prefix_derived: '' | |
@@ -14854,6 +15193,7 @@ | |
operator_name: _log_softmax_backward_data | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -14898,6 +15238,7 @@ | |
operator_name: logsumexp | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -14940,6 +15281,7 @@ | |
operator_name: logsumexp | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -14989,6 +15331,7 @@ | |
operator_name: logsumexp | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15031,6 +15374,7 @@ | |
operator_name: logsumexp | |
overload_name: names_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -15080,6 +15424,7 @@ | |
operator_name: margin_ranking_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -15131,6 +15476,7 @@ | |
operator_name: matmul | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::matmul(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -15166,6 +15512,7 @@ | |
operator_name: matmul | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -15208,6 +15555,7 @@ | |
operator_name: matrix_rank | |
overload_name: tol | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15248,6 +15596,7 @@ | |
operator_name: matrix_rank | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::matrix_rank(Tensor self, bool symmetric=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15283,6 +15632,7 @@ | |
operator_name: matrix_power | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::matrix_power(Tensor self, int n) -> Tensor | |
method_prefix_derived: '' | |
@@ -15318,6 +15668,7 @@ | |
operator_name: max | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -15364,6 +15715,7 @@ | |
operator_name: max | |
overload_name: dim_max | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -15427,6 +15779,7 @@ | |
operator_name: max_values | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15469,6 +15822,7 @@ | |
operator_name: max | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -15515,6 +15869,7 @@ | |
operator_name: max | |
overload_name: names_dim_max | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -15578,6 +15933,7 @@ | |
operator_name: max_values | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15620,6 +15976,7 @@ | |
operator_name: max_pool1d_with_indices | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -15685,6 +16042,7 @@ | |
operator_name: max_pool1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15747,6 +16105,7 @@ | |
operator_name: max_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15809,6 +16168,7 @@ | |
operator_name: mkldnn_max_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15871,6 +16231,7 @@ | |
operator_name: quantized_max_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -15927,6 +16288,7 @@ | |
operator_name: max_pool3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -15989,6 +16351,7 @@ | |
operator_name: mean | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -16026,6 +16389,7 @@ | |
operator_name: mean | |
overload_name: dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -16075,6 +16439,7 @@ | |
operator_name: mean | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -16131,6 +16496,7 @@ | |
operator_name: mean | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -16180,6 +16546,7 @@ | |
operator_name: mean | |
overload_name: names_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -16236,6 +16603,7 @@ | |
operator_name: median | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -16282,6 +16650,7 @@ | |
operator_name: median | |
overload_name: dim_values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -16345,6 +16714,7 @@ | |
operator_name: median | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -16391,6 +16761,7 @@ | |
operator_name: median | |
overload_name: names_dim_values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -16454,6 +16825,7 @@ | |
operator_name: min | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -16500,6 +16872,7 @@ | |
operator_name: min | |
overload_name: dim_min | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -16563,6 +16936,7 @@ | |
operator_name: min_values | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min_values(Tensor self, int[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -16605,6 +16979,7 @@ | |
operator_name: min | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -16651,6 +17026,7 @@ | |
operator_name: min | |
overload_name: names_dim_min | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -16714,6 +17090,7 @@ | |
operator_name: min_values | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -16756,6 +17133,7 @@ | |
operator_name: mkldnn_convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor | |
method_prefix_derived: '' | |
@@ -16815,6 +17193,7 @@ | |
operator_name: mkldnn_convolution_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> Tensor | |
method_prefix_derived: '' | |
@@ -16879,6 +17258,7 @@ | |
operator_name: mkldnn_convolution_backward_weights | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_convolution_backward_weights(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -16946,6 +17326,7 @@ | |
operator_name: mkldnn_convolution_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -17016,6 +17397,7 @@ | |
operator_name: miopen_batch_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -17086,6 +17468,7 @@ | |
operator_name: miopen_batch_norm_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -17156,6 +17539,7 @@ | |
operator_name: miopen_convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17225,6 +17609,7 @@ | |
operator_name: miopen_convolution_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17294,6 +17679,7 @@ | |
operator_name: miopen_convolution_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -17374,6 +17760,7 @@ | |
operator_name: miopen_convolution_backward_bias | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_backward_bias(Tensor grad_output) -> Tensor | |
method_prefix_derived: '' | |
@@ -17403,6 +17790,7 @@ | |
operator_name: miopen_convolution_backward_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17472,6 +17860,7 @@ | |
operator_name: miopen_convolution_transpose | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17546,6 +17935,7 @@ | |
operator_name: miopen_convolution_transpose_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -17631,6 +18021,7 @@ | |
operator_name: miopen_convolution_transpose_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17695,6 +18086,7 @@ | |
operator_name: miopen_convolution_transpose_backward_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17764,6 +18156,7 @@ | |
operator_name: miopen_depthwise_convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17833,6 +18226,7 @@ | |
operator_name: miopen_depthwise_convolution_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_depthwise_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -17902,6 +18296,7 @@ | |
operator_name: miopen_depthwise_convolution_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_depthwise_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -17982,6 +18377,7 @@ | |
operator_name: miopen_depthwise_convolution_backward_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_depthwise_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor | |
method_prefix_derived: '' | |
@@ -18051,6 +18447,7 @@ | |
operator_name: miopen_rnn | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -18157,6 +18554,7 @@ | |
operator_name: miopen_rnn_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) | |
method_prefix_derived: '' | |
@@ -18295,6 +18693,7 @@ | |
operator_name: mm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mm(Tensor self, Tensor mat2) -> Tensor | |
method_prefix_derived: '' | |
@@ -18330,6 +18729,7 @@ | |
operator_name: mm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -18372,6 +18772,7 @@ | |
operator_name: _sparse_mm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor | |
method_prefix_derived: '' | |
@@ -18406,6 +18807,7 @@ | |
operator_name: mode | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -18453,6 +18855,7 @@ | |
operator_name: mode | |
overload_name: values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -18517,6 +18920,7 @@ | |
operator_name: mul | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mul.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -18552,6 +18956,7 @@ | |
operator_name: mul_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -18586,6 +18991,7 @@ | |
operator_name: mul | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -18628,6 +19034,7 @@ | |
operator_name: mul | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mul.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -18663,6 +19070,7 @@ | |
operator_name: mul_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -18697,6 +19105,7 @@ | |
operator_name: mv | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mv(Tensor self, Tensor vec) -> Tensor | |
method_prefix_derived: '' | |
@@ -18732,6 +19141,7 @@ | |
operator_name: mv | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -18774,6 +19184,7 @@ | |
operator_name: mvlgamma | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mvlgamma(Tensor self, int p) -> Tensor | |
method_prefix_derived: '' | |
@@ -18809,6 +19220,7 @@ | |
operator_name: mvlgamma_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -18843,6 +19255,7 @@ | |
operator_name: narrow_copy | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::narrow_copy(Tensor self, int dim, int start, int length) -> Tensor | |
method_prefix_derived: '' | |
@@ -18887,6 +19300,7 @@ | |
operator_name: narrow | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -18932,6 +19346,7 @@ | |
operator_name: native_batch_norm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19002,6 +19417,7 @@ | |
operator_name: batch_norm_stats | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19039,6 +19455,7 @@ | |
operator_name: batch_norm_elemt | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor | |
method_prefix_derived: '' | |
@@ -19093,6 +19510,7 @@ | |
operator_name: batch_norm_gather_stats | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19160,6 +19578,7 @@ | |
operator_name: batch_norm_gather_stats_with_counts | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int[] counts) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19227,6 +19646,7 @@ | |
operator_name: native_batch_norm_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19307,6 +19727,7 @@ | |
operator_name: batch_norm_backward_reduce | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19380,6 +19801,7 @@ | |
operator_name: batch_norm_backward_elemt | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu) -> Tensor | |
method_prefix_derived: '' | |
@@ -19439,6 +19861,7 @@ | |
operator_name: batch_norm_update_stats | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19486,6 +19909,7 @@ | |
operator_name: _nnpack_available | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_nnpack_available() -> bool | |
method_prefix_derived: '' | |
@@ -19510,6 +19934,7 @@ | |
operator_name: _nnpack_spatial_convolution | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -19555,6 +19980,7 @@ | |
operator_name: _nnpack_spatial_convolution_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_nnpack_spatial_convolution_backward(Tensor input, Tensor grad_output, Tensor weight, int[2] padding, bool[3] output_mask) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -19611,6 +20037,7 @@ | |
operator_name: _nnpack_spatial_convolution_backward_input | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_nnpack_spatial_convolution_backward_input(Tensor input, Tensor grad_output, Tensor weight, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -19656,6 +20083,7 @@ | |
operator_name: _nnpack_spatial_convolution_backward_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_nnpack_spatial_convolution_backward_weight(Tensor input, int[] weightsize, Tensor grad_output, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -19701,6 +20129,7 @@ | |
operator_name: ones | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -19743,6 +20172,7 @@ | |
operator_name: ones | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -19779,6 +20209,7 @@ | |
operator_name: ones | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -19816,6 +20247,7 @@ | |
operator_name: ones_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ones_like(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -19845,6 +20277,7 @@ | |
operator_name: ones_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ones_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -19880,6 +20313,7 @@ | |
operator_name: pairwise_distance | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -19932,6 +20366,7 @@ | |
operator_name: cdist | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cdist(Tensor x1, Tensor x2, float p=2) -> Tensor | |
method_prefix_derived: '' | |
@@ -19972,6 +20407,7 @@ | |
operator_name: _cdist_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor | |
method_prefix_derived: '' | |
@@ -20021,6 +20457,7 @@ | |
operator_name: pdist | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pdist(Tensor self, float p=2) -> Tensor | |
method_prefix_derived: '' | |
@@ -20056,6 +20493,7 @@ | |
operator_name: _pdist_forward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_pdist_forward(Tensor self, float p=2) -> Tensor | |
method_prefix_derived: '' | |
@@ -20091,6 +20529,7 @@ | |
operator_name: _pdist_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor | |
method_prefix_derived: '' | |
@@ -20135,6 +20574,7 @@ | |
operator_name: cosine_similarity | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor | |
method_prefix_derived: '' | |
@@ -20181,6 +20621,7 @@ | |
operator_name: permute | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -20215,6 +20656,7 @@ | |
operator_name: numpy_T | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::numpy_T(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -20244,6 +20686,7 @@ | |
operator_name: pixel_shuffle | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor | |
method_prefix_derived: '' | |
@@ -20278,6 +20721,7 @@ | |
operator_name: is_pinned | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_pinned(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -20307,6 +20751,7 @@ | |
operator_name: pin_memory | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pin_memory(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -20336,6 +20781,7 @@ | |
operator_name: pinverse | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor | |
method_prefix_derived: '' | |
@@ -20372,6 +20818,7 @@ | |
operator_name: poisson_nll_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor | |
method_prefix_derived: '' | |
@@ -20426,6 +20873,7 @@ | |
operator_name: scalar_tensor | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20462,6 +20910,7 @@ | |
operator_name: rand | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20504,6 +20953,7 @@ | |
operator_name: rand | |
overload_name: generator_with_names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20552,6 +21002,7 @@ | |
operator_name: rand | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20588,6 +21039,7 @@ | |
operator_name: rand | |
overload_name: generator | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20630,6 +21082,7 @@ | |
operator_name: rand | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -20667,6 +21120,7 @@ | |
operator_name: rand | |
overload_name: generator_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -20710,6 +21164,7 @@ | |
operator_name: rand_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand_like(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -20739,6 +21194,7 @@ | |
operator_name: rand_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rand_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -20774,6 +21230,7 @@ | |
operator_name: randint | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20815,6 +21272,7 @@ | |
operator_name: randint | |
overload_name: generator | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20862,6 +21320,7 @@ | |
operator_name: randint | |
overload_name: low | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20908,6 +21367,7 @@ | |
operator_name: randint | |
overload_name: low_generator | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -20960,6 +21420,7 @@ | |
operator_name: randint | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21002,6 +21463,7 @@ | |
operator_name: randint | |
overload_name: generator_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21050,6 +21512,7 @@ | |
operator_name: randint | |
overload_name: low_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21097,6 +21560,7 @@ | |
operator_name: randint | |
overload_name: low_generator_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21150,6 +21614,7 @@ | |
operator_name: randint_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint_like(Tensor self, int high) -> Tensor | |
method_prefix_derived: '' | |
@@ -21184,6 +21649,7 @@ | |
operator_name: randint_like | |
overload_name: low | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint_like.low(Tensor self, int low, int high) -> Tensor | |
method_prefix_derived: '' | |
@@ -21223,6 +21689,7 @@ | |
operator_name: randint_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint_like.dtype(Tensor self, int high, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -21263,6 +21730,7 @@ | |
operator_name: randint_like | |
overload_name: low_dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -21308,6 +21776,7 @@ | |
operator_name: randn | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21344,6 +21813,7 @@ | |
operator_name: randn | |
overload_name: generator | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21386,6 +21856,7 @@ | |
operator_name: randn | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21428,6 +21899,7 @@ | |
operator_name: randn | |
overload_name: generator_with_names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21476,6 +21948,7 @@ | |
operator_name: randn | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21513,6 +21986,7 @@ | |
operator_name: randn | |
overload_name: generator_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21556,6 +22030,7 @@ | |
operator_name: randn_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn_like(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -21585,6 +22060,7 @@ | |
operator_name: randn_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randn_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -21620,6 +22096,7 @@ | |
operator_name: randperm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21656,6 +22133,7 @@ | |
operator_name: randperm | |
overload_name: generator | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21698,6 +22176,7 @@ | |
operator_name: randperm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21735,6 +22214,7 @@ | |
operator_name: randperm | |
overload_name: generator_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21778,6 +22258,7 @@ | |
operator_name: range | |
overload_name: step | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21825,6 +22306,7 @@ | |
operator_name: range | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -21866,6 +22348,7 @@ | |
operator_name: range | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21914,6 +22397,7 @@ | |
operator_name: reciprocal | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reciprocal(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -21944,6 +22428,7 @@ | |
operator_name: reciprocal_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -21974,6 +22459,7 @@ | |
operator_name: reciprocal | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22011,6 +22497,7 @@ | |
operator_name: neg | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::neg(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -22041,6 +22528,7 @@ | |
operator_name: neg_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::neg_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22071,6 +22559,7 @@ | |
operator_name: neg | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22108,6 +22597,7 @@ | |
operator_name: repeat | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::repeat(Tensor self, int[] repeats) -> Tensor | |
method_prefix_derived: '' | |
@@ -22142,6 +22632,7 @@ | |
operator_name: repeat_interleave | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::repeat_interleave.Tensor(Tensor repeats) -> Tensor | |
method_prefix_derived: '' | |
@@ -22171,6 +22662,7 @@ | |
operator_name: repeat_interleave | |
overload_name: self_Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -22212,6 +22704,7 @@ | |
operator_name: repeat_interleave | |
overload_name: self_int | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -22253,6 +22746,7 @@ | |
operator_name: reshape | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reshape(Tensor self, int[] shape) -> Tensor | |
method_prefix_derived: '' | |
@@ -22288,6 +22782,7 @@ | |
operator_name: _mkldnn_reshape | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor | |
method_prefix_derived: '' | |
@@ -22322,6 +22817,7 @@ | |
operator_name: reshape_as | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reshape_as(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -22356,6 +22852,7 @@ | |
operator_name: round | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::round(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -22386,6 +22883,7 @@ | |
operator_name: round_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::round_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22416,6 +22914,7 @@ | |
operator_name: round | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22453,6 +22952,7 @@ | |
operator_name: rrelu | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -22506,6 +23006,7 @@ | |
operator_name: rrelu_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22559,6 +23060,7 @@ | |
operator_name: relu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::relu(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -22589,6 +23091,7 @@ | |
operator_name: relu_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::relu_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22619,6 +23122,7 @@ | |
operator_name: prelu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prelu(Tensor self, Tensor weight) -> Tensor | |
method_prefix_derived: '' | |
@@ -22654,6 +23158,7 @@ | |
operator_name: prelu_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -22697,6 +23202,7 @@ | |
operator_name: gelu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gelu(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -22726,6 +23232,7 @@ | |
operator_name: gelu_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gelu_backward(Tensor grad, Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -22760,6 +23267,7 @@ | |
operator_name: hardshrink | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor | |
method_prefix_derived: '' | |
@@ -22796,6 +23304,7 @@ | |
operator_name: hardshrink_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor | |
method_prefix_derived: '' | |
@@ -22836,6 +23345,7 @@ | |
operator_name: rsqrt | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rsqrt(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -22866,6 +23376,7 @@ | |
operator_name: rsqrt_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22896,6 +23407,7 @@ | |
operator_name: rsqrt | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -22933,6 +23445,7 @@ | |
operator_name: select | |
overload_name: Dimname | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -22973,6 +23486,7 @@ | |
operator_name: select | |
overload_name: int | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -23013,6 +23527,7 @@ | |
operator_name: selu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::selu(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -23042,6 +23557,7 @@ | |
operator_name: selu_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::selu_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23071,6 +23587,7 @@ | |
operator_name: celu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor | |
method_prefix_derived: '' | |
@@ -23106,6 +23623,7 @@ | |
operator_name: celu_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23141,6 +23659,7 @@ | |
operator_name: sigmoid | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sigmoid(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -23171,6 +23690,7 @@ | |
operator_name: sigmoid_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23201,6 +23721,7 @@ | |
operator_name: sigmoid | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23238,6 +23759,7 @@ | |
operator_name: sin | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sin(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -23268,6 +23790,7 @@ | |
operator_name: sin_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sin_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23298,6 +23821,7 @@ | |
operator_name: sin | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23335,6 +23859,7 @@ | |
operator_name: sinh | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sinh(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -23365,6 +23890,7 @@ | |
operator_name: sinh_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sinh_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23395,6 +23921,7 @@ | |
operator_name: sinh | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23432,6 +23959,7 @@ | |
operator_name: detach | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::detach(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -23462,6 +23990,7 @@ | |
operator_name: detach_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::detach_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23492,6 +24021,7 @@ | |
operator_name: size | |
overload_name: int | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::size.int(Tensor self, int dim) -> int | |
method_prefix_derived: '' | |
@@ -23527,6 +24057,7 @@ | |
operator_name: size | |
overload_name: Dimname | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::size.Dimname(Tensor self, Dimname dim) -> int | |
method_prefix_derived: '' | |
@@ -23562,6 +24093,7 @@ | |
operator_name: slice | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, int start=0, int end=9223372036854775807, int step=1) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -23616,6 +24148,7 @@ | |
operator_name: slogdet | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) | |
method_prefix_derived: '' | |
@@ -23651,6 +24184,7 @@ | |
operator_name: smm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::smm(Tensor self, Tensor mat2) -> Tensor | |
method_prefix_derived: '' | |
@@ -23686,6 +24220,7 @@ | |
operator_name: softmax | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -23727,6 +24262,7 @@ | |
operator_name: softmax | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -23769,6 +24305,7 @@ | |
operator_name: _softmax | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor | |
method_prefix_derived: '' | |
@@ -23808,6 +24345,7 @@ | |
operator_name: _softmax_backward_data | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -23852,6 +24390,7 @@ | |
operator_name: _sparse_add | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23901,6 +24440,7 @@ | |
operator_name: _sparse_dense_add | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_dense_add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23950,6 +24490,7 @@ | |
operator_name: _sparse_div_zerodim | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_div_zerodim.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -23992,6 +24533,7 @@ | |
operator_name: _sparse_div_scalar | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_div_scalar.out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24034,6 +24576,7 @@ | |
operator_name: _sparse_mul | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24076,6 +24619,7 @@ | |
operator_name: _sparse_mul_zerodim | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_mul_zerodim.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24118,6 +24662,7 @@ | |
operator_name: _sparse_mul_scalar | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_mul_scalar.out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24160,6 +24705,7 @@ | |
operator_name: split | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[] | |
method_prefix_derived: '' | |
@@ -24201,6 +24747,7 @@ | |
operator_name: split_with_sizes | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] | |
method_prefix_derived: '' | |
@@ -24242,6 +24789,7 @@ | |
operator_name: squeeze | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::squeeze(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -24272,6 +24820,7 @@ | |
operator_name: squeeze | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -24307,6 +24856,7 @@ | |
operator_name: squeeze_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::squeeze_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24336,6 +24886,7 @@ | |
operator_name: squeeze_ | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24370,6 +24921,7 @@ | |
operator_name: sspaddmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -24424,6 +24976,7 @@ | |
operator_name: sspaddmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24485,6 +25038,7 @@ | |
operator_name: stack | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::stack(Tensor[] tensors, int dim=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -24520,6 +25074,7 @@ | |
operator_name: stack | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24563,6 +25118,7 @@ | |
operator_name: stft | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -24628,6 +25184,7 @@ | |
operator_name: stride | |
overload_name: int | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::stride.int(Tensor self, int dim) -> int | |
method_prefix_derived: '' | |
@@ -24663,6 +25220,7 @@ | |
operator_name: stride | |
overload_name: Dimname | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::stride.Dimname(Tensor self, Dimname dim) -> int | |
method_prefix_derived: '' | |
@@ -24698,6 +25256,7 @@ | |
operator_name: sum | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -24735,6 +25294,7 @@ | |
operator_name: sum | |
overload_name: dim_IntList | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -24784,6 +25344,7 @@ | |
operator_name: sum | |
overload_name: dim_DimnameList | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -24833,6 +25394,7 @@ | |
operator_name: sum | |
overload_name: IntList_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24889,6 +25451,7 @@ | |
operator_name: sum | |
overload_name: DimnameList_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -24945,6 +25508,7 @@ | |
operator_name: sum_to_size | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sum_to_size(Tensor self, int[] size) -> Tensor | |
method_prefix_derived: '' | |
@@ -24979,6 +25543,7 @@ | |
operator_name: sqrt | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sqrt(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -25009,6 +25574,7 @@ | |
operator_name: sqrt_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sqrt_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25039,6 +25605,7 @@ | |
operator_name: sqrt | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25076,6 +25643,7 @@ | |
operator_name: std | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std(Tensor self, bool unbiased=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -25112,6 +25680,7 @@ | |
operator_name: std | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -25160,6 +25729,7 @@ | |
operator_name: std_mean | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -25198,6 +25768,7 @@ | |
operator_name: std_mean | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -25248,6 +25819,7 @@ | |
operator_name: std_mean | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -25298,6 +25870,7 @@ | |
operator_name: std | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25353,6 +25926,7 @@ | |
operator_name: std | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -25401,6 +25975,7 @@ | |
operator_name: std | |
overload_name: names_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25456,6 +26031,7 @@ | |
operator_name: prod | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -25493,6 +26069,7 @@ | |
operator_name: prod | |
overload_name: dim_int | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -25541,6 +26118,7 @@ | |
operator_name: prod | |
overload_name: int_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25596,6 +26174,7 @@ | |
operator_name: prod | |
overload_name: dim_Dimname | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -25644,6 +26223,7 @@ | |
operator_name: prod | |
overload_name: Dimname_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25699,6 +26279,7 @@ | |
operator_name: t | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::t(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -25729,6 +26310,7 @@ | |
operator_name: t_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::t_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25758,6 +26340,7 @@ | |
operator_name: tan | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tan(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -25788,6 +26371,7 @@ | |
operator_name: tan_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tan_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25818,6 +26402,7 @@ | |
operator_name: tan | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25855,6 +26440,7 @@ | |
operator_name: tanh | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tanh(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -25885,6 +26471,7 @@ | |
operator_name: tanh_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tanh_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25915,6 +26502,7 @@ | |
operator_name: tanh | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -25952,6 +26540,7 @@ | |
operator_name: tensordot | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor | |
method_prefix_derived: '' | |
@@ -25996,6 +26585,7 @@ | |
operator_name: threshold | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor | |
method_prefix_derived: '' | |
@@ -26035,6 +26625,7 @@ | |
operator_name: threshold_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -26074,6 +26665,7 @@ | |
operator_name: threshold | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -26121,6 +26713,7 @@ | |
operator_name: threshold_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor | |
method_prefix_derived: '' | |
@@ -26160,6 +26753,7 @@ | |
operator_name: transpose | |
overload_name: int | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -26200,6 +26794,7 @@ | |
operator_name: transpose | |
overload_name: Dimname | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -26240,6 +26835,7 @@ | |
operator_name: _mkldnn_transpose | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor | |
method_prefix_derived: '' | |
@@ -26279,6 +26875,7 @@ | |
operator_name: transpose_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -26318,6 +26915,7 @@ | |
operator_name: _mkldnn_transpose_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -26357,6 +26955,7 @@ | |
operator_name: one_hot | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::one_hot(Tensor self, int num_classes=-1) -> Tensor | |
method_prefix_derived: '' | |
@@ -26392,6 +26991,7 @@ | |
operator_name: flip | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::flip(Tensor self, int[] dims) -> Tensor | |
method_prefix_derived: '' | |
@@ -26427,6 +27027,7 @@ | |
operator_name: roll | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor | |
method_prefix_derived: '' | |
@@ -26470,6 +27071,7 @@ | |
operator_name: rot90 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor | |
method_prefix_derived: '' | |
@@ -26512,6 +27114,7 @@ | |
operator_name: trapz | |
overload_name: x | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor | |
method_prefix_derived: '' | |
@@ -26553,6 +27156,7 @@ | |
operator_name: trapz | |
overload_name: dx | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor | |
method_prefix_derived: '' | |
@@ -26596,6 +27200,7 @@ | |
operator_name: _trilinear | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -26661,6 +27266,7 @@ | |
operator_name: triplet_margin_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -26730,6 +27336,7 @@ | |
operator_name: trunc | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::trunc(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -26760,6 +27367,7 @@ | |
operator_name: trunc_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::trunc_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -26790,6 +27398,7 @@ | |
operator_name: trunc | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -26827,6 +27436,7 @@ | |
operator_name: type_as | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::type_as(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -26861,6 +27471,7 @@ | |
operator_name: _has_compatible_shallow_copy_type | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool | |
method_prefix_derived: '' | |
@@ -26895,6 +27506,7 @@ | |
operator_name: _unique | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -26939,6 +27551,7 @@ | |
operator_name: unique_dim | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -26997,6 +27610,7 @@ | |
operator_name: unique_consecutive | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27050,6 +27664,7 @@ | |
operator_name: unique_dim_consecutive | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27102,6 +27717,7 @@ | |
operator_name: _unique2 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27155,6 +27771,7 @@ | |
operator_name: _unsafe_view | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_unsafe_view(Tensor self, int[] size) -> Tensor | |
method_prefix_derived: '' | |
@@ -27189,6 +27806,7 @@ | |
operator_name: unsqueeze | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -27224,6 +27842,7 @@ | |
operator_name: unsqueeze_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -27258,6 +27877,7 @@ | |
operator_name: var | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var(Tensor self, bool unbiased=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -27294,6 +27914,7 @@ | |
operator_name: var | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -27342,6 +27963,7 @@ | |
operator_name: var | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -27397,6 +28019,7 @@ | |
operator_name: var | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -27445,6 +28068,7 @@ | |
operator_name: var | |
overload_name: names_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -27500,6 +28124,7 @@ | |
operator_name: var_mean | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27538,6 +28163,7 @@ | |
operator_name: var_mean | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27588,6 +28214,7 @@ | |
operator_name: var_mean | |
overload_name: names_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27638,6 +28265,7 @@ | |
operator_name: view_as | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::view_as(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -27672,6 +28300,7 @@ | |
operator_name: where | |
overload_name: self | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -27712,6 +28341,7 @@ | |
operator_name: where | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::where(Tensor condition) -> Tensor[] | |
method_prefix_derived: '' | |
@@ -27741,6 +28371,7 @@ | |
operator_name: _s_where | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_s_where(Tensor condition, Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -27780,6 +28411,7 @@ | |
operator_name: norm_except_dim | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -27821,6 +28453,7 @@ | |
operator_name: _weight_norm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -27861,6 +28494,7 @@ | |
operator_name: _weight_norm_cuda_interface | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27904,6 +28538,7 @@ | |
operator_name: _weight_norm_cuda_interface_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_weight_norm_cuda_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -27956,6 +28591,7 @@ | |
operator_name: _weight_norm_differentiable_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -28008,6 +28644,7 @@ | |
operator_name: zeros | |
overload_name: names | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -28050,6 +28687,7 @@ | |
operator_name: zeros | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -28086,6 +28724,7 @@ | |
operator_name: zeros | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -28123,6 +28762,7 @@ | |
operator_name: zeros_like | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::zeros_like(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -28152,6 +28792,7 @@ | |
operator_name: zeros_like | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::zeros_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -28187,6 +28828,7 @@ | |
operator_name: _standard_gamma_grad | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor | |
method_prefix_derived: '' | |
@@ -28221,6 +28863,7 @@ | |
operator_name: _standard_gamma | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -28256,6 +28899,7 @@ | |
operator_name: _dirichlet_grad | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor | |
method_prefix_derived: '' | |
@@ -28295,6 +28939,7 @@ | |
operator_name: _sample_dirichlet | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -28330,6 +28975,7 @@ | |
operator_name: poisson | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::poisson(Tensor self, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -28365,6 +29011,7 @@ | |
operator_name: native_norm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::native_norm(Tensor self, Scalar p=2) -> Tensor | |
method_prefix_derived: '' | |
@@ -28400,6 +29047,7 @@ | |
operator_name: _sparse_sum | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_sum(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -28429,6 +29077,7 @@ | |
operator_name: _sparse_sum | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -28464,6 +29113,7 @@ | |
operator_name: _sparse_sum | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -28499,6 +29149,7 @@ | |
operator_name: _sparse_sum | |
overload_name: dim_dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -28540,6 +29191,7 @@ | |
operator_name: _sparse_sum_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -28579,6 +29231,7 @@ | |
operator_name: norm | |
overload_name: ScalarOpt_dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -28620,6 +29273,7 @@ | |
operator_name: norm | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor | |
method_prefix_derived: '' | |
@@ -28656,6 +29310,7 @@ | |
operator_name: norm | |
overload_name: ScalarOpt_dim_dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -28708,6 +29363,7 @@ | |
operator_name: norm | |
overload_name: ScalarOpt_dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -28755,6 +29411,7 @@ | |
operator_name: norm | |
overload_name: dtype_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -28814,6 +29471,7 @@ | |
operator_name: norm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -28868,6 +29526,7 @@ | |
operator_name: norm | |
overload_name: names_ScalarOpt_dim_dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -28920,6 +29579,7 @@ | |
operator_name: norm | |
overload_name: names_ScalarOpt_dim | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -28967,6 +29627,7 @@ | |
operator_name: norm | |
overload_name: names_dtype_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29026,6 +29687,7 @@ | |
operator_name: norm | |
overload_name: names_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29080,6 +29742,7 @@ | |
operator_name: frobenius_norm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::frobenius_norm(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -29109,6 +29772,7 @@ | |
operator_name: frobenius_norm | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -29150,6 +29814,7 @@ | |
operator_name: frobenius_norm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29199,6 +29864,7 @@ | |
operator_name: nuclear_norm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -29234,6 +29900,7 @@ | |
operator_name: nuclear_norm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29277,6 +29944,7 @@ | |
operator_name: nuclear_norm | |
overload_name: dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -29318,6 +29986,7 @@ | |
operator_name: nuclear_norm | |
overload_name: dim_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29367,6 +30036,7 @@ | |
operator_name: clone | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::clone(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -29397,6 +30067,7 @@ | |
operator_name: resize_as_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::resize_as_(Tensor(a!) self, Tensor the_template) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29432,6 +30103,7 @@ | |
operator_name: pow | |
overload_name: Tensor_Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29474,6 +30146,7 @@ | |
operator_name: pow | |
overload_name: Tensor_Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor | |
method_prefix_derived: '' | |
@@ -29509,6 +30182,7 @@ | |
operator_name: zero_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::zero_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29539,6 +30213,7 @@ | |
operator_name: sub | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29588,6 +30263,7 @@ | |
operator_name: sub | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -29630,6 +30306,7 @@ | |
operator_name: sub_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29671,6 +30348,7 @@ | |
operator_name: sub | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -29712,6 +30390,7 @@ | |
operator_name: sub_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29752,6 +30431,7 @@ | |
operator_name: rsub | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -29793,6 +30473,7 @@ | |
operator_name: rsub | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -29833,6 +30514,7 @@ | |
operator_name: s_native_addmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::s_native_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -29894,6 +30576,7 @@ | |
operator_name: s_native_addmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::s_native_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -29947,6 +30630,7 @@ | |
operator_name: s_native_addmm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::s_native_addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -30000,6 +30684,7 @@ | |
operator_name: _sparse_addmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -30053,6 +30738,7 @@ | |
operator_name: addmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -30114,6 +30800,7 @@ | |
operator_name: addmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -30168,6 +30855,7 @@ | |
operator_name: addmm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -30221,6 +30909,7 @@ | |
operator_name: sparse_coo_tensor | |
overload_name: size | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_coo_tensor.size(int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -30256,6 +30945,7 @@ | |
operator_name: sparse_coo_tensor | |
overload_name: indices | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -30297,6 +30987,7 @@ | |
operator_name: sparse_coo_tensor | |
overload_name: indices_size | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -30343,6 +31034,7 @@ | |
operator_name: _sparse_coo_tensor_unsafe | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -30389,6 +31081,7 @@ | |
operator_name: _sparse_coo_tensor_with_dims | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -30434,6 +31127,7 @@ | |
operator_name: _sparse_coo_tensor_with_dims_and_tensors | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -30489,6 +31183,7 @@ | |
operator_name: sparse_resize_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -30533,6 +31228,7 @@ | |
operator_name: sparse_resize_and_clear_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -30577,6 +31273,7 @@ | |
operator_name: sparse_mask | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_mask(Tensor self, Tensor mask) -> Tensor | |
method_prefix_derived: '' | |
@@ -30611,6 +31308,7 @@ | |
operator_name: to_dense | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to_dense(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -30640,6 +31338,7 @@ | |
operator_name: to_dense_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor | |
method_prefix_derived: '' | |
@@ -30674,6 +31373,7 @@ | |
operator_name: sparse_dim | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sparse_dim(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -30703,6 +31403,7 @@ | |
operator_name: _dimI | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_dimI(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -30732,6 +31433,7 @@ | |
operator_name: dense_dim | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dense_dim(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -30761,6 +31463,7 @@ | |
operator_name: _dimV | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_dimV(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -30790,6 +31493,7 @@ | |
operator_name: _nnz | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_nnz(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -30819,6 +31523,7 @@ | |
operator_name: coalesce | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::coalesce(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -30848,6 +31553,7 @@ | |
operator_name: is_coalesced | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_coalesced(Tensor self) -> bool | |
method_prefix_derived: '' | |
@@ -30877,6 +31583,7 @@ | |
operator_name: _indices | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_indices(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -30906,6 +31613,7 @@ | |
operator_name: _values | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_values(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -30935,6 +31643,7 @@ | |
operator_name: _coalesced_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -30969,6 +31678,7 @@ | |
operator_name: indices | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::indices(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -30998,6 +31708,7 @@ | |
operator_name: values | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::values(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -31027,6 +31738,7 @@ | |
operator_name: hspmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -31069,6 +31781,7 @@ | |
operator_name: hspmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor | |
method_prefix_derived: '' | |
@@ -31103,6 +31816,7 @@ | |
operator_name: copy_sparse_to_sparse_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -31143,6 +31857,7 @@ | |
operator_name: numel | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::numel(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -31173,6 +31888,7 @@ | |
operator_name: unbind | |
overload_name: int | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[] | |
method_prefix_derived: '' | |
@@ -31209,6 +31925,7 @@ | |
operator_name: unbind | |
overload_name: Dimname | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[] | |
method_prefix_derived: '' | |
@@ -31244,6 +31961,7 @@ | |
operator_name: to_sparse | |
overload_name: sparse_dim | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -31278,6 +31996,7 @@ | |
operator_name: to_sparse | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to_sparse(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -31307,6 +32026,7 @@ | |
operator_name: to_mkldnn | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to_mkldnn(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -31336,6 +32056,7 @@ | |
operator_name: mkldnn_reorder_conv2d_weight | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -31392,6 +32113,7 @@ | |
operator_name: to_mkldnn_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor | |
method_prefix_derived: '' | |
@@ -31426,6 +32148,7 @@ | |
operator_name: quantize_linear | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantize_linear(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -31470,6 +32193,7 @@ | |
operator_name: quantize_linear_per_channel | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantize_linear_per_channel(Tensor self, Tensor scales, Tensor zero_points, int[] axis, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -31519,6 +32243,7 @@ | |
operator_name: dequantize | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dequantize(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -31549,6 +32274,7 @@ | |
operator_name: _dequantize_linear | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_dequantize_linear(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor | |
method_prefix_derived: '' | |
@@ -31593,6 +32319,7 @@ | |
operator_name: q_scale | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::q_scale(Tensor self) -> float | |
method_prefix_derived: '' | |
@@ -31623,6 +32350,7 @@ | |
operator_name: q_zero_point | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::q_zero_point(Tensor self) -> int | |
method_prefix_derived: '' | |
@@ -31653,6 +32381,7 @@ | |
operator_name: q_per_channel_scales | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::q_per_channel_scales(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -31683,6 +32412,7 @@ | |
operator_name: q_per_channel_zero_points | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::q_per_channel_zero_points(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -31713,6 +32443,7 @@ | |
operator_name: q_per_channel_axis | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::q_per_channel_axis(Tensor self) -> int[] | |
method_prefix_derived: '' | |
@@ -31743,6 +32474,7 @@ | |
operator_name: int_repr | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::int_repr(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -31773,6 +32505,7 @@ | |
operator_name: _per_tensor_affine_qtensor | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_per_tensor_affine_qtensor(Tensor self, float scale, int zero_point) -> Tensor | |
method_prefix_derived: '' | |
@@ -31812,6 +32545,7 @@ | |
operator_name: _per_channel_affine_qtensor | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_per_channel_affine_qtensor(Tensor self, Tensor scale, Tensor zero_point, int[] axis) -> Tensor | |
method_prefix_derived: '' | |
@@ -31856,6 +32590,7 @@ | |
operator_name: qscheme | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::qscheme(Tensor self) -> QScheme | |
method_prefix_derived: '' | |
@@ -31885,6 +32620,7 @@ | |
operator_name: fake_quantize_per_tensor_affine | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor | |
method_prefix_derived: '' | |
@@ -31934,6 +32670,7 @@ | |
operator_name: fake_quantize_per_tensor_affine_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fake_quantize_per_tensor_affine_backward(Tensor grad, Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor | |
method_prefix_derived: '' | |
@@ -31988,6 +32725,7 @@ | |
operator_name: to | |
overload_name: dtype_layout | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to.dtype_layout(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, bool non_blocking=False, bool copy=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -32037,6 +32775,7 @@ | |
operator_name: to | |
overload_name: device | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -32088,6 +32827,7 @@ | |
operator_name: to | |
overload_name: dtype | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -32134,6 +32874,7 @@ | |
operator_name: to | |
overload_name: other | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -32180,6 +32921,7 @@ | |
operator_name: meshgrid | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::meshgrid(Tensor[] tensors) -> Tensor[] | |
method_prefix_derived: '' | |
@@ -32209,6 +32951,7 @@ | |
operator_name: cartesian_prod | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cartesian_prod(Tensor[] tensors) -> Tensor | |
method_prefix_derived: '' | |
@@ -32238,6 +32981,7 @@ | |
operator_name: combinations | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -32279,6 +33023,7 @@ | |
operator_name: item | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::item(Tensor self) -> Scalar | |
method_prefix_derived: '' | |
@@ -32308,6 +33053,7 @@ | |
operator_name: _local_scalar_dense | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_local_scalar_dense(Tensor self) -> Scalar | |
method_prefix_derived: '' | |
@@ -32337,6 +33083,7 @@ | |
operator_name: _thnn_fused_lstm_cell | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32394,6 +33141,7 @@ | |
operator_name: _thnn_fused_lstm_cell_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32460,6 +33208,7 @@ | |
operator_name: _thnn_fused_gru_cell | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32514,6 +33263,7 @@ | |
operator_name: _thnn_fused_gru_cell_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32565,6 +33315,7 @@ | |
operator_name: lstm | |
overload_name: input | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32640,6 +33391,7 @@ | |
operator_name: lstm | |
overload_name: data | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32715,6 +33467,7 @@ | |
operator_name: gru | |
overload_name: input | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32787,6 +33540,7 @@ | |
operator_name: gru | |
overload_name: data | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32859,6 +33613,7 @@ | |
operator_name: rnn_tanh | |
overload_name: input | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -32931,6 +33686,7 @@ | |
operator_name: rnn_tanh | |
overload_name: data | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33003,6 +33759,7 @@ | |
operator_name: rnn_relu | |
overload_name: input | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33075,6 +33832,7 @@ | |
operator_name: rnn_relu | |
overload_name: data | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33147,6 +33905,7 @@ | |
operator_name: lstm_cell | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33206,6 +33965,7 @@ | |
operator_name: gru_cell | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -33262,6 +34022,7 @@ | |
operator_name: rnn_tanh_cell | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -33318,6 +34079,7 @@ | |
operator_name: rnn_relu_cell | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -33374,6 +34136,7 @@ | |
operator_name: quantized_lstm | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33463,6 +34226,7 @@ | |
operator_name: quantized_gru | |
overload_name: input | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33535,6 +34299,7 @@ | |
operator_name: quantized_gru | |
overload_name: data | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33607,6 +34372,7 @@ | |
operator_name: quantized_lstm_cell | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -33704,6 +34470,7 @@ | |
operator_name: quantized_gru_cell | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor | |
method_prefix_derived: '' | |
@@ -33798,6 +34565,7 @@ | |
operator_name: quantized_rnn_relu_cell | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor | |
method_prefix_derived: '' | |
@@ -33892,6 +34660,7 @@ | |
operator_name: quantized_rnn_tanh_cell | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor | |
method_prefix_derived: '' | |
@@ -33986,6 +34755,7 @@ | |
operator_name: _pack_padded_sequence | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -34028,6 +34798,7 @@ | |
operator_name: _pack_padded_sequence_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor | |
method_prefix_derived: '' | |
@@ -34072,6 +34843,7 @@ | |
operator_name: _pad_packed_sequence | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -34124,6 +34896,7 @@ | |
operator_name: set_ | |
overload_name: source_Storage | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34158,6 +34931,7 @@ | |
operator_name: set_ | |
overload_name: source_Storage_storage_offset | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34208,6 +34982,7 @@ | |
operator_name: set_ | |
overload_name: source_Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34242,6 +35017,7 @@ | |
operator_name: set_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::set_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34271,6 +35047,7 @@ | |
operator_name: set_quantizer_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::set_quantizer_(Tensor(a!) self, ConstQuantizerPtr quantizer) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34305,6 +35082,7 @@ | |
operator_name: is_set_to | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::is_set_to(Tensor self, Tensor tensor) -> bool | |
method_prefix_derived: '' | |
@@ -34339,6 +35117,7 @@ | |
operator_name: masked_fill_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34378,6 +35157,7 @@ | |
operator_name: masked_fill | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor | |
method_prefix_derived: '' | |
@@ -34418,6 +35198,7 @@ | |
operator_name: masked_fill_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34457,6 +35238,7 @@ | |
operator_name: masked_fill | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor | |
method_prefix_derived: '' | |
@@ -34497,6 +35279,7 @@ | |
operator_name: masked_scatter_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34536,6 +35319,7 @@ | |
operator_name: masked_scatter | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor | |
method_prefix_derived: '' | |
@@ -34576,6 +35360,7 @@ | |
operator_name: view | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::view(Tensor(a) self, int[] size) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -34610,6 +35395,7 @@ | |
operator_name: put_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34655,6 +35441,7 @@ | |
operator_name: index_add_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34699,6 +35486,7 @@ | |
operator_name: index_add | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_add(Tensor self, int dim, Tensor index, Tensor source) -> Tensor | |
method_prefix_derived: '' | |
@@ -34744,6 +35532,7 @@ | |
operator_name: index_fill_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_fill_.Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34788,6 +35577,7 @@ | |
operator_name: index_fill | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_fill.Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor | |
method_prefix_derived: '' | |
@@ -34833,6 +35623,7 @@ | |
operator_name: index_fill_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_fill_.Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34877,6 +35668,7 @@ | |
operator_name: index_fill | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_fill.Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor | |
method_prefix_derived: '' | |
@@ -34922,6 +35714,7 @@ | |
operator_name: scatter_ | |
overload_name: src | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -34966,6 +35759,7 @@ | |
operator_name: scatter | |
overload_name: src | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor | |
method_prefix_derived: '' | |
@@ -35011,6 +35805,7 @@ | |
operator_name: scatter_ | |
overload_name: value | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35055,6 +35850,7 @@ | |
operator_name: scatter | |
overload_name: value | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor | |
method_prefix_derived: '' | |
@@ -35100,6 +35896,7 @@ | |
operator_name: scatter_add_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35144,6 +35941,7 @@ | |
operator_name: scatter_add | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor | |
method_prefix_derived: '' | |
@@ -35189,6 +35987,7 @@ | |
operator_name: lt_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35223,6 +36022,7 @@ | |
operator_name: lt_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35257,6 +36057,7 @@ | |
operator_name: gt_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35291,6 +36092,7 @@ | |
operator_name: gt_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35325,6 +36127,7 @@ | |
operator_name: le_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35359,6 +36162,7 @@ | |
operator_name: le_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35393,6 +36197,7 @@ | |
operator_name: ge_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35427,6 +36232,7 @@ | |
operator_name: ge_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35461,6 +36267,7 @@ | |
operator_name: eq_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35495,6 +36302,7 @@ | |
operator_name: eq_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35529,6 +36337,7 @@ | |
operator_name: ne_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35563,6 +36372,7 @@ | |
operator_name: ne_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35597,6 +36407,7 @@ | |
operator_name: __and__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -35632,6 +36443,7 @@ | |
operator_name: __and__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -35667,6 +36479,7 @@ | |
operator_name: __iand__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35701,6 +36514,7 @@ | |
operator_name: __iand__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35735,6 +36549,7 @@ | |
operator_name: __or__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -35770,6 +36585,7 @@ | |
operator_name: __or__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -35805,6 +36621,7 @@ | |
operator_name: __ior__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35839,6 +36656,7 @@ | |
operator_name: __ior__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35873,6 +36691,7 @@ | |
operator_name: __xor__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -35908,6 +36727,7 @@ | |
operator_name: __xor__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -35943,6 +36763,7 @@ | |
operator_name: __ixor__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -35977,6 +36798,7 @@ | |
operator_name: __ixor__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36011,6 +36833,7 @@ | |
operator_name: __lshift__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -36046,6 +36869,7 @@ | |
operator_name: __lshift__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -36081,6 +36905,7 @@ | |
operator_name: __ilshift__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36115,6 +36940,7 @@ | |
operator_name: __ilshift__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36149,6 +36975,7 @@ | |
operator_name: __rshift__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -36184,6 +37011,7 @@ | |
operator_name: __rshift__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -36219,6 +37047,7 @@ | |
operator_name: __irshift__ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36253,6 +37082,7 @@ | |
operator_name: __irshift__ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36287,6 +37117,7 @@ | |
operator_name: lgamma_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lgamma_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36316,6 +37147,7 @@ | |
operator_name: atan2_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36350,6 +37182,7 @@ | |
operator_name: tril_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36385,6 +37218,7 @@ | |
operator_name: triu_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36420,6 +37254,7 @@ | |
operator_name: digamma_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::digamma_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36449,6 +37284,7 @@ | |
operator_name: polygamma_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36483,6 +37319,7 @@ | |
operator_name: renorm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36527,6 +37364,7 @@ | |
operator_name: pow_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36561,6 +37399,7 @@ | |
operator_name: pow_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36595,6 +37434,7 @@ | |
operator_name: lerp_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36634,6 +37474,7 @@ | |
operator_name: lerp_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36673,6 +37514,7 @@ | |
operator_name: fmod_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36707,6 +37549,7 @@ | |
operator_name: fmod_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36741,6 +37584,7 @@ | |
operator_name: remainder_ | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36775,6 +37619,7 @@ | |
operator_name: remainder_ | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36809,6 +37654,7 @@ | |
operator_name: addbmm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36862,6 +37708,7 @@ | |
operator_name: addbmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -36923,6 +37770,7 @@ | |
operator_name: addbmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -36977,6 +37825,7 @@ | |
operator_name: addcdiv_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37023,6 +37872,7 @@ | |
operator_name: random_ | |
overload_name: from | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::random_.from(Tensor(a!) self, int from, int to, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37069,6 +37919,7 @@ | |
operator_name: random_ | |
overload_name: to | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37110,6 +37961,7 @@ | |
operator_name: random_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37146,6 +37998,7 @@ | |
operator_name: uniform_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37194,6 +38047,7 @@ | |
operator_name: normal_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37242,6 +38096,7 @@ | |
operator_name: cauchy_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37290,6 +38145,7 @@ | |
operator_name: log_normal_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37338,6 +38194,7 @@ | |
operator_name: exponential_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37380,6 +38237,7 @@ | |
operator_name: geometric_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37421,6 +38279,7 @@ | |
operator_name: diag | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37464,6 +38323,7 @@ | |
operator_name: diag | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::diag(Tensor self, int diagonal=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -37500,6 +38360,7 @@ | |
operator_name: cross | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37548,6 +38409,7 @@ | |
operator_name: cross | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -37589,6 +38451,7 @@ | |
operator_name: triu | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37632,6 +38495,7 @@ | |
operator_name: triu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triu(Tensor self, int diagonal=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -37668,6 +38532,7 @@ | |
operator_name: tril | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37711,6 +38576,7 @@ | |
operator_name: tril | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tril(Tensor self, int diagonal=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -37747,6 +38613,7 @@ | |
operator_name: tril_indices | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -37794,6 +38661,7 @@ | |
operator_name: triu_indices | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -37841,6 +38709,7 @@ | |
operator_name: trace | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::trace(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -37871,6 +38740,7 @@ | |
operator_name: ne | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37913,6 +38783,7 @@ | |
operator_name: ne | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ne.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -37948,6 +38819,7 @@ | |
operator_name: ne | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -37990,6 +38862,7 @@ | |
operator_name: ne | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ne.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38025,6 +38898,7 @@ | |
operator_name: eq | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38067,6 +38941,7 @@ | |
operator_name: eq | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eq.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38102,6 +38977,7 @@ | |
operator_name: eq | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38144,6 +39020,7 @@ | |
operator_name: eq | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eq.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38179,6 +39056,7 @@ | |
operator_name: ge | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38221,6 +39099,7 @@ | |
operator_name: ge | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ge.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38256,6 +39135,7 @@ | |
operator_name: ge | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38298,6 +39178,7 @@ | |
operator_name: ge | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ge.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38333,6 +39214,7 @@ | |
operator_name: le | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38375,6 +39257,7 @@ | |
operator_name: le | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::le.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38410,6 +39293,7 @@ | |
operator_name: le | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38452,6 +39336,7 @@ | |
operator_name: le | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::le.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38487,6 +39372,7 @@ | |
operator_name: gt | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38529,6 +39415,7 @@ | |
operator_name: gt | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gt.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38564,6 +39451,7 @@ | |
operator_name: gt | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38606,6 +39494,7 @@ | |
operator_name: gt | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gt.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38641,6 +39530,7 @@ | |
operator_name: lt | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38683,6 +39573,7 @@ | |
operator_name: lt | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lt.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38718,6 +39609,7 @@ | |
operator_name: lt | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38760,6 +39652,7 @@ | |
operator_name: lt | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lt.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -38795,6 +39688,7 @@ | |
operator_name: take | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38837,6 +39731,7 @@ | |
operator_name: take | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::take(Tensor self, Tensor index) -> Tensor | |
method_prefix_derived: '' | |
@@ -38872,6 +39767,7 @@ | |
operator_name: index_select | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -38919,6 +39815,7 @@ | |
operator_name: index_select | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::index_select(Tensor self, int dim, Tensor index) -> Tensor | |
method_prefix_derived: '' | |
@@ -38959,6 +39856,7 @@ | |
operator_name: masked_select | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -39001,6 +39899,7 @@ | |
operator_name: masked_select | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::masked_select(Tensor self, Tensor mask) -> Tensor | |
method_prefix_derived: '' | |
@@ -39036,6 +39935,7 @@ | |
operator_name: nonzero | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -39073,6 +39973,7 @@ | |
operator_name: nonzero | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nonzero(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -39103,6 +40004,7 @@ | |
operator_name: nonzero_numpy | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nonzero_numpy(Tensor self) -> Tensor[] | |
method_prefix_derived: '' | |
@@ -39133,6 +40035,7 @@ | |
operator_name: gather | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -39187,6 +40090,7 @@ | |
operator_name: gather | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -39234,6 +40138,7 @@ | |
operator_name: _gather_sparse_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor | |
method_prefix_derived: '' | |
@@ -39278,6 +40183,7 @@ | |
operator_name: addcmul | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -39332,6 +40238,7 @@ | |
operator_name: addcmul | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -39379,6 +40286,7 @@ | |
operator_name: addcmul_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -39425,6 +40333,7 @@ | |
operator_name: addcdiv | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -39479,6 +40388,7 @@ | |
operator_name: addcdiv | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -39526,6 +40436,7 @@ | |
operator_name: lstsq | |
overload_name: X | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) | |
method_prefix_derived: '' | |
@@ -39583,6 +40494,7 @@ | |
operator_name: lstsq | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR) | |
method_prefix_derived: '' | |
@@ -39623,6 +40535,7 @@ | |
operator_name: triangular_solve | |
overload_name: X | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) | |
method_prefix_derived: '' | |
@@ -39698,6 +40611,7 @@ | |
operator_name: triangular_solve | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) | |
method_prefix_derived: '' | |
@@ -39756,6 +40670,7 @@ | |
operator_name: _triangular_solve_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_triangular_solve_helper(Tensor self, Tensor A, bool upper, bool transpose, bool unitriangular) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -39808,6 +40723,7 @@ | |
operator_name: symeig | |
overload_name: e | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) | |
method_prefix_derived: '' | |
@@ -39872,6 +40788,7 @@ | |
operator_name: symeig | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) | |
method_prefix_derived: '' | |
@@ -39919,6 +40836,7 @@ | |
operator_name: _symeig_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -39961,6 +40879,7 @@ | |
operator_name: eig | |
overload_name: e | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) | |
method_prefix_derived: '' | |
@@ -40019,6 +40938,7 @@ | |
operator_name: eig | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors) | |
method_prefix_derived: '' | |
@@ -40060,6 +40980,7 @@ | |
operator_name: svd | |
overload_name: U | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) | |
method_prefix_derived: '' | |
@@ -40137,6 +41058,7 @@ | |
operator_name: svd | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) | |
method_prefix_derived: '' | |
@@ -40188,6 +41110,7 @@ | |
operator_name: _svd_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -40233,6 +41156,7 @@ | |
operator_name: cholesky | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -40276,6 +41200,7 @@ | |
operator_name: cholesky | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cholesky(Tensor self, bool upper=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -40312,6 +41237,7 @@ | |
operator_name: _cholesky_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cholesky_helper(Tensor self, bool upper) -> Tensor | |
method_prefix_derived: '' | |
@@ -40346,6 +41272,7 @@ | |
operator_name: cholesky_solve | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -40394,6 +41321,7 @@ | |
operator_name: cholesky_solve | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -40435,6 +41363,7 @@ | |
operator_name: _cholesky_solve_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor | |
method_prefix_derived: '' | |
@@ -40474,6 +41403,7 @@ | |
operator_name: solve | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) | |
method_prefix_derived: '' | |
@@ -40514,6 +41444,7 @@ | |
operator_name: solve | |
overload_name: solution | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) | |
method_prefix_derived: '' | |
@@ -40571,6 +41502,7 @@ | |
operator_name: _solve_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -40608,6 +41540,7 @@ | |
operator_name: cholesky_inverse | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -40651,6 +41584,7 @@ | |
operator_name: cholesky_inverse | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -40687,6 +41621,7 @@ | |
operator_name: qr | |
overload_name: Q | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) | |
method_prefix_derived: '' | |
@@ -40745,6 +41680,7 @@ | |
operator_name: qr | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) | |
method_prefix_derived: '' | |
@@ -40786,6 +41722,7 @@ | |
operator_name: _qr_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_qr_helper(Tensor self, bool some) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -40823,6 +41760,7 @@ | |
operator_name: geqrf | |
overload_name: a | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) | |
method_prefix_derived: '' | |
@@ -40875,6 +41813,7 @@ | |
operator_name: geqrf | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) | |
method_prefix_derived: '' | |
@@ -40910,6 +41849,7 @@ | |
operator_name: orgqr | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -40952,6 +41892,7 @@ | |
operator_name: orgqr | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::orgqr(Tensor self, Tensor input2) -> Tensor | |
method_prefix_derived: '' | |
@@ -40987,6 +41928,7 @@ | |
operator_name: ormqr | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41046,6 +41988,7 @@ | |
operator_name: ormqr | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -41098,6 +42041,7 @@ | |
operator_name: _lu_with_info | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -41145,6 +42089,7 @@ | |
operator_name: lu_solve | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41192,6 +42137,7 @@ | |
operator_name: lu_solve | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor | |
method_prefix_derived: '' | |
@@ -41232,6 +42178,7 @@ | |
operator_name: _lu_solve_helper | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_lu_solve_helper(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor | |
method_prefix_derived: '' | |
@@ -41271,6 +42218,7 @@ | |
operator_name: multinomial | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41326,6 +42274,7 @@ | |
operator_name: multinomial | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -41374,6 +42323,7 @@ | |
operator_name: _multinomial_alias_setup | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_multinomial_alias_setup(Tensor probs) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -41406,6 +42356,7 @@ | |
operator_name: _multinomial_alias_draw | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_multinomial_alias_draw(Tensor J, Tensor q, int num_samples, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -41452,6 +42403,7 @@ | |
operator_name: lgamma | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41489,6 +42441,7 @@ | |
operator_name: lgamma | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lgamma(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -41519,6 +42472,7 @@ | |
operator_name: digamma | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41556,6 +42510,7 @@ | |
operator_name: digamma | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::digamma(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -41586,6 +42541,7 @@ | |
operator_name: polygamma | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41628,6 +42584,7 @@ | |
operator_name: polygamma | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::polygamma(int n, Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -41663,6 +42620,7 @@ | |
operator_name: erfinv | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erfinv(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -41693,6 +42651,7 @@ | |
operator_name: erfinv_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erfinv_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41722,6 +42681,7 @@ | |
operator_name: erfinv | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41759,6 +42719,7 @@ | |
operator_name: sign | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sign(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -41789,6 +42750,7 @@ | |
operator_name: sign_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sign_(Tensor(a!) self) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41818,6 +42780,7 @@ | |
operator_name: sign | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41855,6 +42818,7 @@ | |
operator_name: dist | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor | |
method_prefix_derived: '' | |
@@ -41896,6 +42860,7 @@ | |
operator_name: atan2 | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -41938,6 +42903,7 @@ | |
operator_name: atan2 | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::atan2(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -41973,6 +42939,7 @@ | |
operator_name: lerp | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42020,6 +42987,7 @@ | |
operator_name: lerp | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42067,6 +43035,7 @@ | |
operator_name: lerp | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor | |
method_prefix_derived: '' | |
@@ -42107,6 +43076,7 @@ | |
operator_name: lerp | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor | |
method_prefix_derived: '' | |
@@ -42147,6 +43117,7 @@ | |
operator_name: histc | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42202,6 +43173,7 @@ | |
operator_name: histc | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -42250,6 +43222,7 @@ | |
operator_name: fmod | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42292,6 +43265,7 @@ | |
operator_name: fmod | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -42327,6 +43301,7 @@ | |
operator_name: fmod | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42369,6 +43344,7 @@ | |
operator_name: fmod | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -42404,6 +43380,7 @@ | |
operator_name: remainder | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42446,6 +43423,7 @@ | |
operator_name: remainder | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor | |
method_prefix_derived: '' | |
@@ -42481,6 +43459,7 @@ | |
operator_name: remainder | |
overload_name: Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42523,6 +43502,7 @@ | |
operator_name: remainder | |
overload_name: Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -42558,6 +43538,7 @@ | |
operator_name: min | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42600,6 +43581,7 @@ | |
operator_name: min | |
overload_name: other | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min.other(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -42635,6 +43617,7 @@ | |
operator_name: min | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::min(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -42665,6 +43648,7 @@ | |
operator_name: max | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -42707,6 +43691,7 @@ | |
operator_name: max | |
overload_name: other | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max.other(Tensor self, Tensor other) -> Tensor | |
method_prefix_derived: '' | |
@@ -42742,6 +43727,7 @@ | |
operator_name: max | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -42772,6 +43758,7 @@ | |
operator_name: median | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::median(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -42802,6 +43789,7 @@ | |
operator_name: sort | |
overload_name: values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -42866,6 +43854,7 @@ | |
operator_name: sort | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -42913,6 +43902,7 @@ | |
operator_name: argsort | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor | |
method_prefix_derived: '' | |
@@ -42955,6 +43945,7 @@ | |
operator_name: topk | |
overload_name: values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices) | |
method_prefix_derived: '' | |
@@ -43030,6 +44021,7 @@ | |
operator_name: topk | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) | |
method_prefix_derived: '' | |
@@ -43088,6 +44080,7 @@ | |
operator_name: all | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::all(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -43118,6 +44111,7 @@ | |
operator_name: any | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::any(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -43148,6 +44142,7 @@ | |
operator_name: renorm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43200,6 +44195,7 @@ | |
operator_name: renorm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor | |
method_prefix_derived: '' | |
@@ -43245,6 +44241,7 @@ | |
operator_name: unfold | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -43289,6 +44286,7 @@ | |
operator_name: equal | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::equal(Tensor self, Tensor other) -> bool | |
method_prefix_derived: '' | |
@@ -43324,6 +44322,7 @@ | |
operator_name: pow | |
overload_name: Tensor_Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43366,6 +44365,7 @@ | |
operator_name: pow | |
overload_name: Tensor_Tensor | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor | |
method_prefix_derived: '' | |
@@ -43401,6 +44401,7 @@ | |
operator_name: pow | |
overload_name: Scalar_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43443,6 +44444,7 @@ | |
operator_name: pow | |
overload_name: Scalar | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor | |
method_prefix_derived: '' | |
@@ -43477,6 +44479,7 @@ | |
operator_name: normal | |
overload_name: Tensor_float_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43527,6 +44530,7 @@ | |
operator_name: normal | |
overload_name: Tensor_float | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -43569,6 +44573,7 @@ | |
operator_name: normal | |
overload_name: float_Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43618,6 +44623,7 @@ | |
operator_name: normal | |
overload_name: float_Tensor | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -43659,6 +44665,7 @@ | |
operator_name: normal | |
overload_name: Tensor_Tensor_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43708,6 +44715,7 @@ | |
operator_name: normal | |
overload_name: Tensor_Tensor | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -43749,6 +44757,7 @@ | |
operator_name: normal | |
overload_name: float_float | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -43802,6 +44811,7 @@ | |
operator_name: normal | |
overload_name: float_float_out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43856,6 +44866,7 @@ | |
operator_name: alias | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::alias(Tensor(a) self) -> Tensor(a) | |
method_prefix_derived: '' | |
@@ -43886,6 +44897,7 @@ | |
operator_name: _addr | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -43939,6 +44951,7 @@ | |
operator_name: _addr_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -43992,6 +45005,7 @@ | |
operator_name: _addr | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44053,6 +45067,7 @@ | |
operator_name: _index_copy_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44097,6 +45112,7 @@ | |
operator_name: _cumsum | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cumsum(Tensor self, int dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -44131,6 +45147,7 @@ | |
operator_name: _cumsum | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44173,6 +45190,7 @@ | |
operator_name: _cumprod | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cumprod(Tensor self, int dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -44207,6 +45225,7 @@ | |
operator_name: _cumprod | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44249,6 +45268,7 @@ | |
operator_name: _var | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_var(Tensor self, bool unbiased=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -44284,6 +45304,7 @@ | |
operator_name: _std | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_std(Tensor self, bool unbiased=True) -> Tensor | |
method_prefix_derived: '' | |
@@ -44319,6 +45340,7 @@ | |
operator_name: _addmm | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44380,6 +45402,7 @@ | |
operator_name: _addmm | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -44433,6 +45456,7 @@ | |
operator_name: _addmm_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44486,6 +45510,7 @@ | |
operator_name: _cat | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cat(Tensor[] tensors, int dim=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -44521,6 +45546,7 @@ | |
operator_name: _cat | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44564,6 +45590,7 @@ | |
operator_name: _mode | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -44608,6 +45635,7 @@ | |
operator_name: _mode | |
overload_name: values | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -44668,6 +45696,7 @@ | |
operator_name: _max | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_max(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -44711,6 +45740,7 @@ | |
operator_name: _max | |
overload_name: max | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_max.max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -44770,6 +45800,7 @@ | |
operator_name: _min | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_min(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -44813,6 +45844,7 @@ | |
operator_name: _min | |
overload_name: min | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_min.min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -44872,6 +45904,7 @@ | |
operator_name: binary_cross_entropy | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -44926,6 +45959,7 @@ | |
operator_name: binary_cross_entropy | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -44972,6 +46006,7 @@ | |
operator_name: binary_cross_entropy_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45031,6 +46066,7 @@ | |
operator_name: binary_cross_entropy_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -45082,6 +46118,7 @@ | |
operator_name: mse_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45130,6 +46167,7 @@ | |
operator_name: mse_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -45170,6 +46208,7 @@ | |
operator_name: mse_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45222,6 +46261,7 @@ | |
operator_name: mse_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor | |
method_prefix_derived: '' | |
@@ -45266,6 +46306,7 @@ | |
operator_name: l1_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45314,6 +46355,7 @@ | |
operator_name: l1_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -45354,6 +46396,7 @@ | |
operator_name: l1_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45406,6 +46449,7 @@ | |
operator_name: l1_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor | |
method_prefix_derived: '' | |
@@ -45450,6 +46494,7 @@ | |
operator_name: multi_margin_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45516,6 +46561,7 @@ | |
operator_name: multi_margin_loss | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -45574,6 +46620,7 @@ | |
operator_name: multi_margin_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45643,6 +46690,7 @@ | |
operator_name: multi_margin_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -45704,6 +46752,7 @@ | |
operator_name: multilabel_margin_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45752,6 +46801,7 @@ | |
operator_name: multilabel_margin_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -45792,6 +46842,7 @@ | |
operator_name: multilabel_margin_loss_forward | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -45850,6 +46901,7 @@ | |
operator_name: multilabel_margin_loss_forward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) | |
method_prefix_derived: '' | |
@@ -45894,6 +46946,7 @@ | |
operator_name: multilabel_margin_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -45951,6 +47004,7 @@ | |
operator_name: multilabel_margin_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor | |
method_prefix_derived: '' | |
@@ -46000,6 +47054,7 @@ | |
operator_name: nll_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46060,6 +47115,7 @@ | |
operator_name: nll_loss | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor | |
method_prefix_derived: '' | |
@@ -46112,6 +47168,7 @@ | |
operator_name: nll_loss_forward | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -46180,6 +47237,7 @@ | |
operator_name: nll_loss_forward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) | |
method_prefix_derived: '' | |
@@ -46234,6 +47292,7 @@ | |
operator_name: nll_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46301,6 +47360,7 @@ | |
operator_name: nll_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor | |
method_prefix_derived: '' | |
@@ -46360,6 +47420,7 @@ | |
operator_name: nll_loss2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46420,6 +47481,7 @@ | |
operator_name: nll_loss2d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor | |
method_prefix_derived: '' | |
@@ -46472,6 +47534,7 @@ | |
operator_name: nll_loss2d_forward | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -46540,6 +47603,7 @@ | |
operator_name: nll_loss2d_forward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) | |
method_prefix_derived: '' | |
@@ -46594,6 +47658,7 @@ | |
operator_name: nll_loss2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46661,6 +47726,7 @@ | |
operator_name: nll_loss2d_backward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor | |
method_prefix_derived: '' | |
@@ -46720,6 +47786,7 @@ | |
operator_name: smooth_l1_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46768,6 +47835,7 @@ | |
operator_name: smooth_l1_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -46808,6 +47876,7 @@ | |
operator_name: smooth_l1_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46860,6 +47929,7 @@ | |
operator_name: smooth_l1_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor | |
method_prefix_derived: '' | |
@@ -46904,6 +47974,7 @@ | |
operator_name: soft_margin_loss | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -46952,6 +48023,7 @@ | |
operator_name: soft_margin_loss | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor | |
method_prefix_derived: '' | |
@@ -46992,6 +48064,7 @@ | |
operator_name: soft_margin_loss_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47044,6 +48117,7 @@ | |
operator_name: soft_margin_loss_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor | |
method_prefix_derived: '' | |
@@ -47088,6 +48162,7 @@ | |
operator_name: elu | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47143,6 +48218,7 @@ | |
operator_name: elu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -47190,6 +48266,7 @@ | |
operator_name: elu_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47247,6 +48324,7 @@ | |
operator_name: elu_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor | |
method_prefix_derived: '' | |
@@ -47296,6 +48374,7 @@ | |
operator_name: elu_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47343,6 +48422,7 @@ | |
operator_name: glu | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47386,6 +48466,7 @@ | |
operator_name: glu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::glu(Tensor self, int dim=-1) -> Tensor | |
method_prefix_derived: '' | |
@@ -47421,6 +48502,7 @@ | |
operator_name: glu_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47468,6 +48550,7 @@ | |
operator_name: glu_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor | |
method_prefix_derived: '' | |
@@ -47507,6 +48590,7 @@ | |
operator_name: hardtanh | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47556,6 +48640,7 @@ | |
operator_name: hardtanh | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -47597,6 +48682,7 @@ | |
operator_name: hardtanh_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47649,6 +48735,7 @@ | |
operator_name: hardtanh_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor | |
method_prefix_derived: '' | |
@@ -47693,6 +48780,7 @@ | |
operator_name: hardtanh_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47734,6 +48822,7 @@ | |
operator_name: leaky_relu | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47777,6 +48866,7 @@ | |
operator_name: leaky_relu | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor | |
method_prefix_derived: '' | |
@@ -47812,6 +48902,7 @@ | |
operator_name: leaky_relu_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47859,6 +48950,7 @@ | |
operator_name: leaky_relu_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope) -> Tensor | |
method_prefix_derived: '' | |
@@ -47898,6 +48990,7 @@ | |
operator_name: leaky_relu_ | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47933,6 +49026,7 @@ | |
operator_name: log_sigmoid | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -47970,6 +49064,7 @@ | |
operator_name: log_sigmoid | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_sigmoid(Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -47999,6 +49094,7 @@ | |
operator_name: log_sigmoid_forward | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -48047,6 +49143,7 @@ | |
operator_name: log_sigmoid_forward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) | |
method_prefix_derived: '' | |
@@ -48081,6 +49178,7 @@ | |
operator_name: log_sigmoid_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48128,6 +49226,7 @@ | |
operator_name: log_sigmoid_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor | |
method_prefix_derived: '' | |
@@ -48167,6 +49266,7 @@ | |
operator_name: rrelu_with_noise | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48233,6 +49333,7 @@ | |
operator_name: rrelu_with_noise | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -48291,6 +49392,7 @@ | |
operator_name: rrelu_with_noise_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu_with_noise_backward.grad_input(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48353,6 +49455,7 @@ | |
operator_name: rrelu_with_noise_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training) -> Tensor | |
method_prefix_derived: '' | |
@@ -48407,6 +49510,7 @@ | |
operator_name: rrelu_with_noise_ | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48465,6 +49569,7 @@ | |
operator_name: softplus | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48514,6 +49619,7 @@ | |
operator_name: softplus | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor | |
method_prefix_derived: '' | |
@@ -48555,6 +49661,7 @@ | |
operator_name: softplus_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48612,6 +49719,7 @@ | |
operator_name: softplus_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor | |
method_prefix_derived: '' | |
@@ -48661,6 +49769,7 @@ | |
operator_name: softshrink | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48704,6 +49813,7 @@ | |
operator_name: softshrink | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor | |
method_prefix_derived: '' | |
@@ -48739,6 +49849,7 @@ | |
operator_name: softshrink_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48786,6 +49897,7 @@ | |
operator_name: softshrink_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor | |
method_prefix_derived: '' | |
@@ -48825,6 +49937,7 @@ | |
operator_name: adaptive_avg_pool2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -48868,6 +49981,7 @@ | |
operator_name: adaptive_avg_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -48903,6 +50017,7 @@ | |
operator_name: mkldnn_adaptive_avg_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -48938,6 +50053,7 @@ | |
operator_name: _adaptive_avg_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -48973,6 +50089,7 @@ | |
operator_name: _adaptive_avg_pool2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -49007,6 +50124,7 @@ | |
operator_name: adaptive_avg_pool3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49050,6 +50168,7 @@ | |
operator_name: adaptive_avg_pool3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -49085,6 +50204,7 @@ | |
operator_name: adaptive_avg_pool3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49127,6 +50247,7 @@ | |
operator_name: adaptive_avg_pool3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor | |
method_prefix_derived: '' | |
@@ -49161,6 +50282,7 @@ | |
operator_name: adaptive_max_pool2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -49215,6 +50337,7 @@ | |
operator_name: adaptive_max_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -49253,6 +50376,7 @@ | |
operator_name: adaptive_max_pool2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49300,6 +50424,7 @@ | |
operator_name: adaptive_max_pool2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -49339,6 +50464,7 @@ | |
operator_name: adaptive_max_pool3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -49393,6 +50519,7 @@ | |
operator_name: adaptive_max_pool3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -49431,6 +50558,7 @@ | |
operator_name: adaptive_max_pool3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49478,6 +50606,7 @@ | |
operator_name: adaptive_max_pool3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -49517,6 +50646,7 @@ | |
operator_name: avg_pool2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49592,6 +50722,7 @@ | |
operator_name: avg_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -49659,6 +50790,7 @@ | |
operator_name: avg_pool2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49734,6 +50866,7 @@ | |
operator_name: avg_pool2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor | |
method_prefix_derived: '' | |
@@ -49801,6 +50934,7 @@ | |
operator_name: avg_pool3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -49876,6 +51010,7 @@ | |
operator_name: avg_pool3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor | |
method_prefix_derived: '' | |
@@ -49943,6 +51078,7 @@ | |
operator_name: avg_pool3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -50018,6 +51154,7 @@ | |
operator_name: avg_pool3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor | |
method_prefix_derived: '' | |
@@ -50085,6 +51222,7 @@ | |
operator_name: fractional_max_pool2d | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -50150,6 +51288,7 @@ | |
operator_name: fractional_max_pool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -50199,6 +51338,7 @@ | |
operator_name: fractional_max_pool2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -50258,6 +51398,7 @@ | |
operator_name: fractional_max_pool2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -50309,6 +51450,7 @@ | |
operator_name: fractional_max_pool3d | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -50374,6 +51516,7 @@ | |
operator_name: fractional_max_pool3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -50423,6 +51566,7 @@ | |
operator_name: fractional_max_pool3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -50482,6 +51626,7 @@ | |
operator_name: fractional_max_pool3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -50533,6 +51678,7 @@ | |
operator_name: max_pool2d_with_indices | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -50614,6 +51760,7 @@ | |
operator_name: max_pool2d_with_indices | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -50679,6 +51826,7 @@ | |
operator_name: max_pool2d_with_indices_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -50755,6 +51903,7 @@ | |
operator_name: max_pool2d_with_indices_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -50823,6 +51972,7 @@ | |
operator_name: max_pool3d_with_indices | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -50904,6 +52054,7 @@ | |
operator_name: max_pool3d_with_indices | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) | |
method_prefix_derived: '' | |
@@ -50969,6 +52120,7 @@ | |
operator_name: max_pool3d_with_indices_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51045,6 +52197,7 @@ | |
operator_name: max_pool3d_with_indices_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor | |
method_prefix_derived: '' | |
@@ -51113,6 +52266,7 @@ | |
operator_name: max_unpool2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51161,6 +52315,7 @@ | |
operator_name: max_unpool2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -51201,6 +52356,7 @@ | |
operator_name: max_unpool2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51254,6 +52410,7 @@ | |
operator_name: max_unpool2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -51299,6 +52456,7 @@ | |
operator_name: max_unpool3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51359,6 +52517,7 @@ | |
operator_name: max_unpool3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51411,6 +52570,7 @@ | |
operator_name: max_unpool3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51476,6 +52636,7 @@ | |
operator_name: max_unpool3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51533,6 +52694,7 @@ | |
operator_name: reflection_pad1d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51576,6 +52738,7 @@ | |
operator_name: reflection_pad1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad1d(Tensor self, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51611,6 +52774,7 @@ | |
operator_name: reflection_pad1d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51659,6 +52823,7 @@ | |
operator_name: reflection_pad1d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51699,6 +52864,7 @@ | |
operator_name: reflection_pad2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51742,6 +52908,7 @@ | |
operator_name: reflection_pad2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad2d(Tensor self, int[4] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51777,6 +52944,7 @@ | |
operator_name: reflection_pad2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51825,6 +52993,7 @@ | |
operator_name: reflection_pad2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51865,6 +53034,7 @@ | |
operator_name: replication_pad1d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51908,6 +53078,7 @@ | |
operator_name: replication_pad1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad1d(Tensor self, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -51943,6 +53114,7 @@ | |
operator_name: replication_pad1d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -51991,6 +53163,7 @@ | |
operator_name: replication_pad1d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -52031,6 +53204,7 @@ | |
operator_name: replication_pad2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52074,6 +53248,7 @@ | |
operator_name: replication_pad2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad2d(Tensor self, int[4] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -52109,6 +53284,7 @@ | |
operator_name: replication_pad2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52157,6 +53333,7 @@ | |
operator_name: replication_pad2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -52197,6 +53374,7 @@ | |
operator_name: replication_pad3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52240,6 +53418,7 @@ | |
operator_name: replication_pad3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad3d(Tensor self, int[6] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -52275,6 +53454,7 @@ | |
operator_name: replication_pad3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52323,6 +53503,7 @@ | |
operator_name: replication_pad3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor | |
method_prefix_derived: '' | |
@@ -52363,6 +53544,7 @@ | |
operator_name: upsample_linear1d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52411,6 +53593,7 @@ | |
operator_name: upsample_linear1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -52451,6 +53634,7 @@ | |
operator_name: upsample_linear1d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52505,6 +53689,7 @@ | |
operator_name: upsample_linear1d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -52551,6 +53736,7 @@ | |
operator_name: upsample_bilinear2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52599,6 +53785,7 @@ | |
operator_name: upsample_bilinear2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -52639,6 +53826,7 @@ | |
operator_name: upsample_bilinear2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52693,6 +53881,7 @@ | |
operator_name: upsample_bilinear2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -52739,6 +53928,7 @@ | |
operator_name: upsample_bicubic2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52787,6 +53977,7 @@ | |
operator_name: upsample_bicubic2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -52827,6 +54018,7 @@ | |
operator_name: upsample_bicubic2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52881,6 +54073,7 @@ | |
operator_name: upsample_bicubic2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -52927,6 +54120,7 @@ | |
operator_name: upsample_trilinear3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -52975,6 +54169,7 @@ | |
operator_name: upsample_trilinear3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -53015,6 +54210,7 @@ | |
operator_name: upsample_trilinear3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53069,6 +54265,7 @@ | |
operator_name: upsample_trilinear3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners) -> Tensor | |
method_prefix_derived: '' | |
@@ -53115,6 +54312,7 @@ | |
operator_name: upsample_nearest1d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53158,6 +54356,7 @@ | |
operator_name: upsample_nearest1d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest1d(Tensor self, int[1] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -53193,6 +54392,7 @@ | |
operator_name: upsample_nearest1d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53242,6 +54442,7 @@ | |
operator_name: upsample_nearest1d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -53283,6 +54484,7 @@ | |
operator_name: upsample_nearest2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53326,6 +54528,7 @@ | |
operator_name: upsample_nearest2d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest2d(Tensor self, int[2] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -53361,6 +54564,7 @@ | |
operator_name: upsample_nearest2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53410,6 +54614,7 @@ | |
operator_name: upsample_nearest2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -53451,6 +54656,7 @@ | |
operator_name: upsample_nearest3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53494,6 +54700,7 @@ | |
operator_name: upsample_nearest3d | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest3d(Tensor self, int[3] output_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -53529,6 +54736,7 @@ | |
operator_name: upsample_nearest3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53578,6 +54786,7 @@ | |
operator_name: upsample_nearest3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size) -> Tensor | |
method_prefix_derived: '' | |
@@ -53619,6 +54828,7 @@ | |
operator_name: sigmoid_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53661,6 +54871,7 @@ | |
operator_name: sigmoid_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor | |
method_prefix_derived: '' | |
@@ -53695,6 +54906,7 @@ | |
operator_name: tanh_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53737,6 +54949,7 @@ | |
operator_name: tanh_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor | |
method_prefix_derived: '' | |
@@ -53771,6 +54984,7 @@ | |
operator_name: slow_conv_transpose2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -53853,6 +55067,7 @@ | |
operator_name: slow_conv_transpose2d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -53927,6 +55142,7 @@ | |
operator_name: slow_conv_transpose2d_backward | |
overload_name: grad_output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) | |
method_prefix_derived: '' | |
@@ -54036,6 +55252,7 @@ | |
operator_name: slow_conv_transpose2d_backward | |
overload_name: output_mask | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -54129,6 +55346,7 @@ | |
operator_name: slow_conv_transpose3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -54211,6 +55429,7 @@ | |
operator_name: slow_conv_transpose3d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -54285,6 +55504,7 @@ | |
operator_name: slow_conv_transpose3d_backward | |
overload_name: grad_output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) | |
method_prefix_derived: '' | |
@@ -54394,6 +55614,7 @@ | |
operator_name: slow_conv_transpose3d_backward | |
overload_name: output_mask | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_transpose3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -54487,6 +55708,7 @@ | |
operator_name: thnn_conv2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -54555,6 +55777,7 @@ | |
operator_name: thnn_conv2d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -54615,6 +55838,7 @@ | |
operator_name: thnn_conv2d_forward | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) | |
method_prefix_derived: '' | |
@@ -54702,6 +55926,7 @@ | |
operator_name: thnn_conv2d_forward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) | |
method_prefix_derived: '' | |
@@ -54768,6 +55993,7 @@ | |
operator_name: thnn_conv2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) | |
method_prefix_derived: '' | |
@@ -54865,6 +56091,7 @@ | |
operator_name: thnn_conv2d_backward | |
overload_name: output_mask | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -54946,6 +56173,7 @@ | |
operator_name: thnn_conv_depthwise2d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -55021,6 +56249,7 @@ | |
operator_name: thnn_conv_depthwise2d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -55088,6 +56317,7 @@ | |
operator_name: thnn_conv_depthwise2d_forward | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d_forward.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -55159,6 +56389,7 @@ | |
operator_name: thnn_conv_depthwise2d_forward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor | |
method_prefix_derived: '' | |
@@ -55222,6 +56453,7 @@ | |
operator_name: thnn_conv_depthwise2d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) -> (Tensor(a!), Tensor(b!)) | |
method_prefix_derived: '' | |
@@ -55304,6 +56536,7 @@ | |
operator_name: thnn_conv_depthwise2d_backward | |
overload_name: output_mask | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv_depthwise2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[2] output_mask) -> (Tensor grad_input, Tensor grad_weight) | |
method_prefix_derived: '' | |
@@ -55377,6 +56610,7 @@ | |
operator_name: thnn_conv3d | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -55445,6 +56679,7 @@ | |
operator_name: thnn_conv3d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor | |
method_prefix_derived: '' | |
@@ -55505,6 +56740,7 @@ | |
operator_name: thnn_conv3d_forward | |
overload_name: output | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) | |
method_prefix_derived: '' | |
@@ -55592,6 +56828,7 @@ | |
operator_name: thnn_conv3d_forward | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) | |
method_prefix_derived: '' | |
@@ -55658,6 +56895,7 @@ | |
operator_name: thnn_conv3d_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) | |
method_prefix_derived: '' | |
@@ -55755,6 +56993,7 @@ | |
operator_name: thnn_conv3d_backward | |
overload_name: output_mask | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::thnn_conv3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -55836,6 +57075,7 @@ | |
operator_name: slow_conv_dilated2d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -55903,6 +57143,7 @@ | |
operator_name: slow_conv_dilated2d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_dilated2d_backward(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -55980,6 +57221,7 @@ | |
operator_name: slow_conv_dilated3d | |
overload_name: '' | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor | |
method_prefix_derived: '' | |
@@ -56047,6 +57289,7 @@ | |
operator_name: slow_conv_dilated3d_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::slow_conv_dilated3d_backward(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) | |
method_prefix_derived: '' | |
@@ -56124,6 +57367,7 @@ | |
operator_name: col2im | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56191,6 +57435,7 @@ | |
operator_name: col2im | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor | |
method_prefix_derived: '' | |
@@ -56250,6 +57495,7 @@ | |
operator_name: col2im_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56311,6 +57557,7 @@ | |
operator_name: col2im_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor | |
method_prefix_derived: '' | |
@@ -56364,6 +57611,7 @@ | |
operator_name: im2col | |
overload_name: out | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56425,6 +57673,7 @@ | |
operator_name: im2col | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor | |
method_prefix_derived: '' | |
@@ -56478,6 +57727,7 @@ | |
operator_name: im2col_backward | |
overload_name: grad_input | |
use_c10_dispatcher: false | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) | |
method_prefix_derived: '' | |
@@ -56545,6 +57795,7 @@ | |
operator_name: im2col_backward | |
overload_name: '' | |
use_c10_dispatcher: true | |
+ category_override: '' | |
matches_jit_signature: true | |
schema_string: aten::im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor | |
method_prefix_derived: '' | |
diff --git a/build/aten/src/ATen/Functions.h b/build/aten/src/ATen/Functions.h | |
index e13f80001f..7476df2d5d 100644 | |
--- a/build/aten/src/ATen/Functions.h | |
+++ b/build/aten/src/ATen/Functions.h | |
@@ -219,7 +219,7 @@ static inline Tensor empty(IntArrayRef size, c10::optional<DimnameList> names, c | |
#endif | |
static inline Tensor empty(IntArrayRef size, const TensorOptions & options={}, c10::optional<MemoryFormat> memory_format=c10::nullopt); | |
static inline Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options={}, double scale=1, int64_t zero_point=0, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
-static inline Tensor _empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options={}, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
+static inline Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options={}, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
static inline Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional<MemoryFormat> memory_format=c10::nullopt); | |
static inline Tensor empty_like(const Tensor & self); | |
static inline Tensor empty_like(const Tensor & self, const TensorOptions & options, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
@@ -3388,19 +3388,19 @@ static inline Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptio | |
return table->getOp<Tensor (IntArrayRef, const TensorOptions &, double, int64_t, c10::optional<MemoryFormat>)>(options.type_set())(size, options, scale, zero_point, memory_format); | |
#endif | |
} | |
-static inline Tensor _empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
+static inline Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
#ifdef USE_STATIC_DISPATCH | |
switch(tensorTypeIdToBackend(impl::dispatchTypeId(options.type_set()))) { | |
case Backend::QuantizedCPU: | |
- return QuantizedCPUType::_empty_per_channel_affine_quantized_like(self, zero_points, size, axis, options, memory_format); | |
+ return QuantizedCPUType::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, options, memory_format); | |
break; | |
default: | |
- AT_ERROR("_empty_per_channel_affine_quantized_like not implemented for ", at::toString(options.type_set())); | |
+ AT_ERROR("_empty_per_channel_affine_quantized not implemented for ", at::toString(options.type_set())); | |
} | |
#else | |
globalLegacyTypeDispatch().initForTensorTypeSet(options.type_set()); | |
- static auto table = globalATenDispatch().getOpTable("aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor"); | |
- return table->getOp<Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>)>(options.type_set())(self, zero_points, size, axis, options, memory_format); | |
+ static auto table = globalATenDispatch().getOpTable("aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor"); | |
+ return table->getOp<Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>)>(options.type_set())(size, scales, zero_points, axis, options, memory_format); | |
#endif | |
} | |
static inline Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional<MemoryFormat> memory_format) { | |
diff --git a/build/aten/src/ATen/NativeFunctions.h b/build/aten/src/ATen/NativeFunctions.h | |
index 4df95bff8f..0fc3174ad4 100644 | |
--- a/build/aten/src/ATen/NativeFunctions.h | |
+++ b/build/aten/src/ATen/NativeFunctions.h | |
@@ -319,7 +319,7 @@ CAFFE2_API Tensor empty_sparse(IntArrayRef size, const TensorOptions & options={ | |
CAFFE2_API Tensor new_empty(const Tensor & self, IntArrayRef size, const TensorOptions & options={}); | |
CAFFE2_API Tensor new_full(const Tensor & self, IntArrayRef size, Scalar fill_value, const TensorOptions & options={}); | |
CAFFE2_API Tensor empty_affine_quantized_cpu(IntArrayRef size, const TensorOptions & options={}, double scale=1, int64_t zero_point=0, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
-CAFFE2_API Tensor empty_per_channel_affine_quantized_cpu(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options={}, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
+CAFFE2_API Tensor empty_per_channel_affine_quantized_cpu(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options={}, c10::optional<MemoryFormat> memory_format=MemoryFormat::Contiguous); | |
CAFFE2_API Tensor & resize_cpu_(Tensor & self, IntArrayRef size); | |
CAFFE2_API Tensor & resize_cuda_(Tensor & self, IntArrayRef size); | |
CAFFE2_API Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional<MemoryFormat> memory_format=c10::nullopt); | |
diff --git a/build/aten/src/ATen/QuantizedCPUType.cpp b/build/aten/src/ATen/QuantizedCPUType.cpp | |
index 14f37f9735..5672e9e9e3 100644 | |
--- a/build/aten/src/ATen/QuantizedCPUType.cpp | |
+++ b/build/aten/src/ATen/QuantizedCPUType.cpp | |
@@ -56,14 +56,14 @@ Tensor QuantizedCPUType::_empty_affine_quantized(IntArrayRef size, const TensorO | |
const DeviceGuard device_guard(options.device()); | |
return at::native::empty_affine_quantized_cpu(size, options, scale, zero_point, memory_format); | |
} | |
-Tensor QuantizedCPUType::_empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
+Tensor QuantizedCPUType::_empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
#ifdef BUILD_NAMEDTENSOR | |
- if (self.has_names() || zero_points.has_names()) { | |
- AT_ERROR("_empty_per_channel_affine_quantized_like: no named inference rule implemented."); | |
+ if (scales.has_names() || zero_points.has_names()) { | |
+ AT_ERROR("_empty_per_channel_affine_quantized: no named inference rule implemented."); | |
} | |
#endif | |
const DeviceGuard device_guard(options.device()); | |
- return at::native::empty_per_channel_affine_quantized_cpu(self, zero_points, size, axis, options, memory_format); | |
+ return at::native::empty_per_channel_affine_quantized_cpu(size, scales, zero_points, axis, options, memory_format); | |
} | |
Tensor QuantizedCPUType::quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { | |
#ifdef BUILD_NAMEDTENSOR | |
@@ -476,8 +476,8 @@ static auto registerer = torch::RegisterOperators() | |
.impl_unboxedOnlyKernel<Tensor (IntArrayRef, const TensorOptions &, double, int64_t, c10::optional<MemoryFormat>), &QuantizedCPUType::_empty_affine_quantized>(TensorTypeId::QuantizedCPUTensorId) | |
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
.op(torch::RegisterOperators::options() | |
- .schema("aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") | |
- .impl_unboxedOnlyKernel<Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>), &QuantizedCPUType::_empty_per_channel_affine_quantized_like>(TensorTypeId::QuantizedCPUTensorId) | |
+ .schema("aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") | |
+ .impl_unboxedOnlyKernel<Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>), &QuantizedCPUType::_empty_per_channel_affine_quantized>(TensorTypeId::QuantizedCPUTensorId) | |
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
.op(torch::RegisterOperators::options() | |
.schema("aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1) -> Tensor") | |
diff --git a/build/aten/src/ATen/QuantizedCPUType.h b/build/aten/src/ATen/QuantizedCPUType.h | |
index 85667e164e..0e2a6bb817 100644 | |
--- a/build/aten/src/ATen/QuantizedCPUType.h | |
+++ b/build/aten/src/ATen/QuantizedCPUType.h | |
@@ -33,7 +33,7 @@ using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&; | |
struct TORCH_API QuantizedCPUType final { | |
static Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset); | |
static Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options, double scale, int64_t zero_point, c10::optional<MemoryFormat> memory_format); | |
- static Tensor _empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format); | |
+ static Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format); | |
static Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); | |
static Tensor relu(const Tensor & self); | |
static Tensor & relu_(Tensor & self); | |
diff --git a/build/aten/src/ATen/RegistrationDeclarations.h b/build/aten/src/ATen/RegistrationDeclarations.h | |
index 9889d60c14..97d0840c61 100644 | |
--- a/build/aten/src/ATen/RegistrationDeclarations.h | |
+++ b/build/aten/src/ATen/RegistrationDeclarations.h | |
@@ -214,7 +214,7 @@ Tensor empty(IntArrayRef size, const TensorOptions & options, c10::optional<Memo | |
Tensor new_empty(const Tensor & self, IntArrayRef size, const TensorOptions & options); // aten::new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
Tensor new_full(const Tensor & self, IntArrayRef size, Scalar fill_value, const TensorOptions & options); // aten::new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor | |
Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options, double scale, int64_t zero_point, c10::optional<MemoryFormat> memory_format); // aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
-Tensor _empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format); // aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
+Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format); // aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor | |
Tensor & resize_(Tensor & self, IntArrayRef size); // aten::resize_(Tensor(a!) self, int[] size) -> Tensor(a!) | |
Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional<MemoryFormat> memory_format); // aten::empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) | |
Tensor empty_like(const Tensor & self); // aten::empty_like(Tensor self) -> Tensor | |
diff --git a/torch/csrc/autograd/generated/VariableType.h b/torch/csrc/autograd/generated/VariableType.h | |
index 975338033b..e305f719b0 100644 | |
--- a/torch/csrc/autograd/generated/VariableType.h | |
+++ b/torch/csrc/autograd/generated/VariableType.h | |
@@ -125,7 +125,7 @@ struct TORCH_API VariableType final { | |
static Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode) ; | |
static Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights) ; | |
static Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options, double scale, int64_t zero_point, c10::optional<MemoryFormat> memory_format) ; | |
- static Tensor _empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) ; | |
+ static Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) ; | |
static Tensor _fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes) ; | |
static std::tuple<Tensor,Tensor> _fused_dropout(const Tensor & self, double p, Generator * generator) ; | |
static Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad) ; | |
diff --git a/torch/csrc/autograd/generated/VariableTypeEverything.cpp b/torch/csrc/autograd/generated/VariableTypeEverything.cpp | |
index 224f10aa27..136f354444 100644 | |
--- a/torch/csrc/autograd/generated/VariableTypeEverything.cpp | |
+++ b/torch/csrc/autograd/generated/VariableTypeEverything.cpp | |
@@ -3589,27 +3589,27 @@ Tensor VariableType::_empty_affine_quantized(IntArrayRef size, const TensorOptio | |
} | |
return result; | |
} | |
-Tensor VariableType::_empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
- RECORD_FUNCTION("_empty_per_channel_affine_quantized_like", std::vector<c10::IValue>({self, zero_points}), Node::peek_at_next_sequence_nr()); | |
- auto& self_ = unpack(self, "self", 0); | |
- auto& zero_points_ = unpack(zero_points, "zero_points", 1); | |
+Tensor VariableType::_empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
+ RECORD_FUNCTION("_empty_per_channel_affine_quantized", std::vector<c10::IValue>({scales, zero_points}), Node::peek_at_next_sequence_nr()); | |
+ auto& scales_ = unpack(scales, "scales", 1); | |
+ auto& zero_points_ = unpack(zero_points, "zero_points", 2); | |
auto options_ = TensorOptions(options).is_variable(false); | |
std::shared_ptr<NotImplemented> grad_fn; | |
- if (compute_requires_grad( self, zero_points )) { | |
- grad_fn = std::shared_ptr<NotImplemented>(new NotImplemented("_empty_per_channel_affine_quantized_like"), deleteNode); | |
- grad_fn->set_next_edges(collect_next_edges( self, zero_points )); | |
+ if (compute_requires_grad( scales, zero_points )) { | |
+ grad_fn = std::shared_ptr<NotImplemented>(new NotImplemented("_empty_per_channel_affine_quantized"), deleteNode); | |
+ grad_fn->set_next_edges(collect_next_edges( scales, zero_points )); | |
} | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
if (jit::tracer::isTracing()) { | |
tracer_state = jit::tracer::getTracingState(); | |
at::Symbol op_name; | |
- op_name = jit::Symbol::fromQualString("aten::_empty_per_channel_affine_quantized_like"); | |
+ op_name = jit::Symbol::fromQualString("aten::_empty_per_channel_affine_quantized"); | |
node = tracer_state->graph->create(op_name, /*num_outputs=*/0); | |
jit::tracer::recordSourceLocation(node); | |
- jit::tracer::addInputs(node, "self", self); | |
- jit::tracer::addInputs(node, "zero_points", zero_points); | |
jit::tracer::addInputs(node, "size", size); | |
+ jit::tracer::addInputs(node, "scales", scales); | |
+ jit::tracer::addInputs(node, "zero_points", zero_points); | |
jit::tracer::addInputs(node, "axis", axis); | |
jit::tracer::addInputs(node, "options", options); | |
jit::tracer::addInputs(node, "memory_format", memory_format); | |
@@ -3618,10 +3618,10 @@ Tensor VariableType::_empty_per_channel_affine_quantized_like(const Tensor & sel | |
jit::tracer::setTracingState(nullptr); | |
} | |
#ifndef NDEBUG | |
- c10::optional<Storage> self__storage_saved = | |
- self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
- if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); | |
+ c10::optional<Storage> scales__storage_saved = | |
+ scales_.has_storage() ? c10::optional<Storage>(scales_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> scales__impl_saved; | |
+ if (scales_.defined()) scales__impl_saved = scales_.getIntrusivePtr(); | |
c10::optional<Storage> zero_points__storage_saved = | |
zero_points_.has_storage() ? c10::optional<Storage>(zero_points_.storage()) : c10::nullopt; | |
c10::intrusive_ptr<TensorImpl> zero_points__impl_saved; | |
@@ -3629,13 +3629,13 @@ Tensor VariableType::_empty_per_channel_affine_quantized_like(const Tensor & sel | |
#endif | |
auto tmp = ([&]() { | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- return at::_empty_per_channel_affine_quantized_like(self_, zero_points_, size, axis, options_, memory_format); | |
+ return at::_empty_per_channel_affine_quantized(size, scales_, zero_points_, axis, options_, memory_format); | |
})(); | |
auto result = as_variable(std::move(tmp)); | |
#ifndef NDEBUG | |
- if (self__storage_saved.has_value()) | |
- AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
- if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
+ if (scales__storage_saved.has_value()) | |
+ AT_ASSERT(scales__storage_saved.value().is_alias_of(scales_.storage())); | |
+ if (scales__impl_saved) AT_ASSERT(scales__impl_saved == scales_.getIntrusivePtr()); | |
if (zero_points__storage_saved.has_value()) | |
AT_ASSERT(zero_points__storage_saved.value().is_alias_of(zero_points_.storage())); | |
if (zero_points__impl_saved) AT_ASSERT(zero_points__impl_saved == zero_points_.getIntrusivePtr()); | |
@@ -62493,8 +62493,8 @@ static auto registerer = torch::RegisterOperators() | |
.impl_unboxedOnlyKernel<Tensor (IntArrayRef, const TensorOptions &, double, int64_t, c10::optional<MemoryFormat>), &VariableType::_empty_affine_quantized>(TensorTypeId::VariableTensorId) | |
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
.op(torch::RegisterOperators::options() | |
- .schema("aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") | |
- .impl_unboxedOnlyKernel<Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>), &VariableType::_empty_per_channel_affine_quantized_like>(TensorTypeId::VariableTensorId) | |
+ .schema("aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") | |
+ .impl_unboxedOnlyKernel<Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>), &VariableType::_empty_per_channel_affine_quantized>(TensorTypeId::VariableTensorId) | |
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
.op(torch::RegisterOperators::options() | |
.schema("aten::_fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor") | |
diff --git a/torch/csrc/autograd/generated/VariableType_3.cpp b/torch/csrc/autograd/generated/VariableType_3.cpp | |
index 43df709a28..c084515867 100644 | |
--- a/torch/csrc/autograd/generated/VariableType_3.cpp | |
+++ b/torch/csrc/autograd/generated/VariableType_3.cpp | |
@@ -398,6 +398,66 @@ Tensor VariableType::_embedding_bag_per_sample_weights_backward(const Tensor & g | |
} | |
return result; | |
} | |
+Tensor VariableType::_empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
+ RECORD_FUNCTION("_empty_per_channel_affine_quantized", std::vector<c10::IValue>({scales, zero_points}), Node::peek_at_next_sequence_nr()); | |
+ auto& scales_ = unpack(scales, "scales", 1); | |
+ auto& zero_points_ = unpack(zero_points, "zero_points", 2); | |
+ auto options_ = TensorOptions(options).is_variable(false); | |
+ std::shared_ptr<NotImplemented> grad_fn; | |
+ if (compute_requires_grad( scales, zero_points )) { | |
+ grad_fn = std::shared_ptr<NotImplemented>(new NotImplemented("_empty_per_channel_affine_quantized"), deleteNode); | |
+ grad_fn->set_next_edges(collect_next_edges( scales, zero_points )); | |
+ } | |
+ torch::jit::Node* node = nullptr; | |
+ std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
+ if (jit::tracer::isTracing()) { | |
+ tracer_state = jit::tracer::getTracingState(); | |
+ at::Symbol op_name; | |
+ op_name = jit::Symbol::fromQualString("aten::_empty_per_channel_affine_quantized"); | |
+ node = tracer_state->graph->create(op_name, /*num_outputs=*/0); | |
+ jit::tracer::recordSourceLocation(node); | |
+ jit::tracer::addInputs(node, "size", size); | |
+ jit::tracer::addInputs(node, "scales", scales); | |
+ jit::tracer::addInputs(node, "zero_points", zero_points); | |
+ jit::tracer::addInputs(node, "axis", axis); | |
+ jit::tracer::addInputs(node, "options", options); | |
+ jit::tracer::addInputs(node, "memory_format", memory_format); | |
+ tracer_state->graph->insertNode(node); | |
+ | |
+ jit::tracer::setTracingState(nullptr); | |
+ } | |
+ #ifndef NDEBUG | |
+ c10::optional<Storage> scales__storage_saved = | |
+ scales_.has_storage() ? c10::optional<Storage>(scales_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> scales__impl_saved; | |
+ if (scales_.defined()) scales__impl_saved = scales_.getIntrusivePtr(); | |
+ c10::optional<Storage> zero_points__storage_saved = | |
+ zero_points_.has_storage() ? c10::optional<Storage>(zero_points_.storage()) : c10::nullopt; | |
+ c10::intrusive_ptr<TensorImpl> zero_points__impl_saved; | |
+ if (zero_points_.defined()) zero_points__impl_saved = zero_points_.getIntrusivePtr(); | |
+ #endif | |
+ auto tmp = ([&]() { | |
+ at::AutoNonVariableTypeMode non_var_type_mode(true); | |
+ return at::_empty_per_channel_affine_quantized(size, scales_, zero_points_, axis, options_, memory_format); | |
+ })(); | |
+ auto result = as_variable(std::move(tmp)); | |
+ #ifndef NDEBUG | |
+ if (scales__storage_saved.has_value()) | |
+ AT_ASSERT(scales__storage_saved.value().is_alias_of(scales_.storage())); | |
+ if (scales__impl_saved) AT_ASSERT(scales__impl_saved == scales_.getIntrusivePtr()); | |
+ if (zero_points__storage_saved.has_value()) | |
+ AT_ASSERT(zero_points__storage_saved.value().is_alias_of(zero_points_.storage())); | |
+ if (zero_points__impl_saved) AT_ASSERT(zero_points__impl_saved == zero_points_.getIntrusivePtr()); | |
+ #endif | |
+ if (grad_fn) { | |
+ set_history(flatten_tensor_args( result ), grad_fn); | |
+ } | |
+ if (tracer_state) { | |
+ jit::tracer::setTracingState(std::move(tracer_state)); | |
+ jit::tracer::addOutput(node, result); | |
+ } | |
+ return result; | |
+} | |
Tensor VariableType::_log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) { | |
RECORD_FUNCTION("_log_softmax_backward_data", std::vector<c10::IValue>({grad_output, output, self}), Node::peek_at_next_sequence_nr()); | |
auto& grad_output_ = unpack(grad_output, "grad_output", 0); | |
@@ -11607,6 +11667,10 @@ static auto registerer = torch::RegisterOperators() | |
.schema("aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode) -> Tensor") | |
.impl_unboxedOnlyKernel<Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t), &VariableType::_embedding_bag_per_sample_weights_backward>(TensorTypeId::VariableTensorId) | |
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
+ .op(torch::RegisterOperators::options() | |
+ .schema("aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") | |
+ .impl_unboxedOnlyKernel<Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>), &VariableType::_empty_per_channel_affine_quantized>(TensorTypeId::VariableTensorId) | |
+ .aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
.op(torch::RegisterOperators::options() | |
.schema("aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor") | |
.impl_unboxedOnlyKernel<Tensor (const Tensor &, const Tensor &, int64_t, const Tensor &), &VariableType::_log_softmax_backward_data>(TensorTypeId::VariableTensorId) | |
diff --git a/torch/csrc/autograd/generated/VariableType_4.cpp b/torch/csrc/autograd/generated/VariableType_4.cpp | |
index 13fac6ac47..1e162c10e9 100644 | |
--- a/torch/csrc/autograd/generated/VariableType_4.cpp | |
+++ b/torch/csrc/autograd/generated/VariableType_4.cpp | |
@@ -495,66 +495,6 @@ void VariableType::_cufft_set_plan_cache_max_size(int64_t device_index, int64_t | |
RECORD_FUNCTION("_cufft_set_plan_cache_max_size", std::vector<c10::IValue>({}), Node::peek_at_next_sequence_nr()); | |
TypeDefault::_cufft_set_plan_cache_max_size(device_index, max_size); | |
} | |
-Tensor VariableType::_empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format) { | |
- RECORD_FUNCTION("_empty_per_channel_affine_quantized_like", std::vector<c10::IValue>({self, zero_points}), Node::peek_at_next_sequence_nr()); | |
- auto& self_ = unpack(self, "self", 0); | |
- auto& zero_points_ = unpack(zero_points, "zero_points", 1); | |
- auto options_ = TensorOptions(options).is_variable(false); | |
- std::shared_ptr<NotImplemented> grad_fn; | |
- if (compute_requires_grad( self, zero_points )) { | |
- grad_fn = std::shared_ptr<NotImplemented>(new NotImplemented("_empty_per_channel_affine_quantized_like"), deleteNode); | |
- grad_fn->set_next_edges(collect_next_edges( self, zero_points )); | |
- } | |
- torch::jit::Node* node = nullptr; | |
- std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
- if (jit::tracer::isTracing()) { | |
- tracer_state = jit::tracer::getTracingState(); | |
- at::Symbol op_name; | |
- op_name = jit::Symbol::fromQualString("aten::_empty_per_channel_affine_quantized_like"); | |
- node = tracer_state->graph->create(op_name, /*num_outputs=*/0); | |
- jit::tracer::recordSourceLocation(node); | |
- jit::tracer::addInputs(node, "self", self); | |
- jit::tracer::addInputs(node, "zero_points", zero_points); | |
- jit::tracer::addInputs(node, "size", size); | |
- jit::tracer::addInputs(node, "axis", axis); | |
- jit::tracer::addInputs(node, "options", options); | |
- jit::tracer::addInputs(node, "memory_format", memory_format); | |
- tracer_state->graph->insertNode(node); | |
- | |
- jit::tracer::setTracingState(nullptr); | |
- } | |
- #ifndef NDEBUG | |
- c10::optional<Storage> self__storage_saved = | |
- self_.has_storage() ? c10::optional<Storage>(self_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> self__impl_saved; | |
- if (self_.defined()) self__impl_saved = self_.getIntrusivePtr(); | |
- c10::optional<Storage> zero_points__storage_saved = | |
- zero_points_.has_storage() ? c10::optional<Storage>(zero_points_.storage()) : c10::nullopt; | |
- c10::intrusive_ptr<TensorImpl> zero_points__impl_saved; | |
- if (zero_points_.defined()) zero_points__impl_saved = zero_points_.getIntrusivePtr(); | |
- #endif | |
- auto tmp = ([&]() { | |
- at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- return at::_empty_per_channel_affine_quantized_like(self_, zero_points_, size, axis, options_, memory_format); | |
- })(); | |
- auto result = as_variable(std::move(tmp)); | |
- #ifndef NDEBUG | |
- if (self__storage_saved.has_value()) | |
- AT_ASSERT(self__storage_saved.value().is_alias_of(self_.storage())); | |
- if (self__impl_saved) AT_ASSERT(self__impl_saved == self_.getIntrusivePtr()); | |
- if (zero_points__storage_saved.has_value()) | |
- AT_ASSERT(zero_points__storage_saved.value().is_alias_of(zero_points_.storage())); | |
- if (zero_points__impl_saved) AT_ASSERT(zero_points__impl_saved == zero_points_.getIntrusivePtr()); | |
- #endif | |
- if (grad_fn) { | |
- set_history(flatten_tensor_args( result ), grad_fn); | |
- } | |
- if (tracer_state) { | |
- jit::tracer::setTracingState(std::move(tracer_state)); | |
- jit::tracer::addOutput(node, result); | |
- } | |
- return result; | |
-} | |
Tensor & VariableType::_index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { | |
RECORD_FUNCTION("_index_copy_", std::vector<c10::IValue>({self, index, source}), Node::peek_at_next_sequence_nr()); | |
auto& self_ = unpack(self, "self", 0); | |
@@ -12217,10 +12157,6 @@ static auto registerer = torch::RegisterOperators() | |
.schema("aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> void") | |
.impl_unboxedOnlyKernel<void (int64_t, int64_t), &VariableType::_cufft_set_plan_cache_max_size>(TensorTypeId::VariableTensorId) | |
.aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
- .op(torch::RegisterOperators::options() | |
- .schema("aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor") | |
- .impl_unboxedOnlyKernel<Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const TensorOptions &, c10::optional<MemoryFormat>), &VariableType::_empty_per_channel_affine_quantized_like>(TensorTypeId::VariableTensorId) | |
- .aliasAnalysis(c10::AliasAnalysisKind::FROM_SCHEMA)) | |
.op(torch::RegisterOperators::options() | |
.schema("aten::_index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)") | |
.impl_unboxedOnlyKernel<Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &), &VariableType::_index_copy_>(TensorTypeId::VariableTensorId) | |
diff --git a/torch/csrc/autograd/generated/python_torch_functions.cpp b/torch/csrc/autograd/generated/python_torch_functions.cpp | |
index d01d03d459..e31be8883a 100644 | |
--- a/torch/csrc/autograd/generated/python_torch_functions.cpp | |
+++ b/torch/csrc/autograd/generated/python_torch_functions.cpp | |
@@ -1124,31 +1124,31 @@ static PyObject * THPVariable__empty_affine_quantized(PyObject* self_, PyObject* | |
Py_RETURN_NONE; | |
END_HANDLE_TH_ERRORS | |
} | |
-static PyObject * THPVariable__empty_per_channel_affine_quantized_like(PyObject* self_, PyObject* args, PyObject* kwargs) | |
+static PyObject * THPVariable__empty_per_channel_affine_quantized(PyObject* self_, PyObject* args, PyObject* kwargs) | |
{ | |
HANDLE_TH_ERRORS | |
static PythonArgParser parser({ | |
- "_empty_per_channel_affine_quantized_like(Tensor input, Tensor zero_points, IntArrayRef size, IntArrayRef axis, *, MemoryFormat? memory_format=MemoryFormat::Contiguous, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)", | |
+ "_empty_per_channel_affine_quantized(IntArrayRef size, *, Tensor scales, Tensor zero_points, IntArrayRef axis, MemoryFormat? memory_format=MemoryFormat::Contiguous, ScalarType dtype=None, Layout layout=torch.strided, Device device=None, bool pin_memory=False, bool requires_grad=False)", | |
}, /*traceable=*/true); | |
ParsedArgs<11> parsed_args; | |
auto r = parser.parse(args, kwargs, parsed_args); | |
if (r.idx == 0) { | |
- auto self = r.tensor(0); | |
- auto zero_points = r.tensor(1); | |
- auto size = r.intlist(2); | |
+ auto size = r.intlist(0); | |
+ auto scales = r.tensor(1); | |
+ auto zero_points = r.tensor(2); | |
auto axis = r.intlist(3); | |
auto memory_format = r.memoryformatOptional(4); | |
- auto dtype = r.scalartypeWithDefault(5, self.scalar_type()); | |
- auto device = r.deviceWithDefault(7, self.device()); | |
+ auto dtype = r.scalartype(5); | |
+ auto device = r.device(7); | |
const auto options = TensorOptions() | |
.dtype(dtype) | |
.device(device) | |
- .layout(r.layoutWithDefault(6, *torch::getLayout(self.type().backend())).layout) | |
+ .layout(r.layout(6).layout) | |
.requires_grad(r.toBool(9)) | |
.pinned_memory(r.toBool(8)); | |
- return wrap(dispatch__empty_per_channel_affine_quantized_like(self, zero_points, size, axis, memory_format, options)); | |
+ return wrap(dispatch__empty_per_channel_affine_quantized(size, scales, zero_points, axis, memory_format, options)); | |
} | |
Py_RETURN_NONE; | |
END_HANDLE_TH_ERRORS | |
@@ -10219,7 +10219,7 @@ static PyMethodDef torch_functions[] = { | |
{"_dirichlet_grad", (PyCFunction)(void(*)(void))THPVariable__dirichlet_grad, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
{"_embedding_bag", (PyCFunction)(void(*)(void))THPVariable__embedding_bag, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
{"_empty_affine_quantized", (PyCFunction)(void(*)(void))THPVariable__empty_affine_quantized, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
- {"_empty_per_channel_affine_quantized_like", (PyCFunction)(void(*)(void))THPVariable__empty_per_channel_affine_quantized_like, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
+ {"_empty_per_channel_affine_quantized", (PyCFunction)(void(*)(void))THPVariable__empty_per_channel_affine_quantized, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
{"_fft_with_size", (PyCFunction)(void(*)(void))THPVariable__fft_with_size, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
{"_fused_dropout", (PyCFunction)(void(*)(void))THPVariable__fused_dropout, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
{"_has_compatible_shallow_copy_type", (PyCFunction)(void(*)(void))THPVariable__has_compatible_shallow_copy_type, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL}, | |
diff --git a/torch/csrc/autograd/generated/python_torch_functions_dispatch.h b/torch/csrc/autograd/generated/python_torch_functions_dispatch.h | |
index 8eef92c89a..1199706808 100644 | |
--- a/torch/csrc/autograd/generated/python_torch_functions_dispatch.h | |
+++ b/torch/csrc/autograd/generated/python_torch_functions_dispatch.h | |
@@ -258,10 +258,10 @@ inline Tensor dispatch__empty_affine_quantized(IntArrayRef size, double scale, i | |
AutoNoGIL no_gil; | |
return torch::_empty_affine_quantized(size, options, scale, zero_point, memory_format); | |
} | |
-inline Tensor dispatch__empty_per_channel_affine_quantized_like(const Tensor & self, const Tensor & zero_points, IntArrayRef size, IntArrayRef axis, c10::optional<MemoryFormat> memory_format, const TensorOptions & options) { | |
+inline Tensor dispatch__empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, IntArrayRef axis, c10::optional<MemoryFormat> memory_format, const TensorOptions & options) { | |
torch::utils::maybe_initialize_cuda(options); | |
AutoNoGIL no_gil; | |
- return torch::_empty_per_channel_affine_quantized_like(self, zero_points, size, axis, options, memory_format); | |
+ return torch::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, options, memory_format); | |
} | |
inline Tensor dispatch__fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes) { | |
diff --git a/torch/csrc/autograd/generated/variable_factories.h b/torch/csrc/autograd/generated/variable_factories.h | |
index 91232fc200..0589af2662 100644 | |
--- a/torch/csrc/autograd/generated/variable_factories.h | |
+++ b/torch/csrc/autograd/generated/variable_factories.h | |
@@ -429,18 +429,18 @@ inline at::Tensor _empty_affine_quantized(at::IntArrayRef size, const at::Tensor | |
} | |
return result; | |
} | |
-inline at::Tensor _empty_per_channel_affine_quantized_like(const at::Tensor & self, const at::Tensor & zero_points, at::IntArrayRef size, at::IntArrayRef axis, const at::TensorOptions & options = {}, c10::optional<MemoryFormat> memory_format = MemoryFormat::Contiguous) { | |
+inline at::Tensor _empty_per_channel_affine_quantized(at::IntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, at::IntArrayRef axis, const at::TensorOptions & options = {}, c10::optional<MemoryFormat> memory_format = MemoryFormat::Contiguous) { | |
torch::jit::Node* node = nullptr; | |
std::shared_ptr<jit::tracer::TracingState> tracer_state; | |
if (jit::tracer::isTracing()) { | |
tracer_state = jit::tracer::getTracingState(); | |
at::Symbol op_name; | |
- op_name = jit::Symbol::fromQualString("aten::_empty_per_channel_affine_quantized_like"); | |
+ op_name = jit::Symbol::fromQualString("aten::_empty_per_channel_affine_quantized"); | |
node = tracer_state->graph->create(op_name, /*num_outputs=*/0); | |
jit::tracer::recordSourceLocation(node); | |
- jit::tracer::addInputs(node, "self", self); | |
- jit::tracer::addInputs(node, "zero_points", zero_points); | |
jit::tracer::addInputs(node, "size", size); | |
+ jit::tracer::addInputs(node, "scales", scales); | |
+ jit::tracer::addInputs(node, "zero_points", zero_points); | |
jit::tracer::addInputs(node, "axis", axis); | |
jit::tracer::addInputs(node, "options", options); | |
jit::tracer::addInputs(node, "memory_format", memory_format); | |
@@ -450,7 +450,7 @@ inline at::Tensor _empty_per_channel_affine_quantized_like(const at::Tensor & se | |
} | |
at::Tensor tensor = ([&]() { | |
at::AutoNonVariableTypeMode non_var_type_mode(true); | |
- return at::_empty_per_channel_affine_quantized_like(self, zero_points, size, axis, at::TensorOptions(options).is_variable(false), memory_format); | |
+ return at::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, at::TensorOptions(options).is_variable(false), memory_format); | |
})(); | |
at::Tensor result = | |
autograd::make_variable(std::move(tensor), /*requires_grad=*/options.requires_grad()); | |
diff --git a/torch/csrc/jit/generated/register_aten_ops_1.cpp b/torch/csrc/jit/generated/register_aten_ops_1.cpp | |
index 3542aa781d..1126fa1d11 100644 | |
--- a/torch/csrc/jit/generated/register_aten_ops_1.cpp | |
+++ b/torch/csrc/jit/generated/register_aten_ops_1.cpp | |
@@ -470,7 +470,7 @@ RegisterOperators reg( | |
atenOperatorOptions() | |
), | |
Operator( | |
- "aten::_empty_per_channel_affine_quantized_like(Tensor self, Tensor zero_points, int[] size, int[] axis, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor", | |
+ "aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int[] axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor", | |
[](Stack & stack) { | |
const auto options = TensorOptions() | |
@@ -479,16 +479,16 @@ RegisterOperators reg( | |
.device((std::move(peek(stack, 6, 9))).toOptional<c10::Device>()) | |
.pinned_memory((std::move(peek(stack, 7, 9))).toOptional<bool>()); | |
#ifdef USE_STATIC_DISPATCH | |
- auto result_ = at::_empty_per_channel_affine_quantized_like((std::move(peek(stack, 0, 9))).toTensor(), | |
+ auto result_ = at::_empty_per_channel_affine_quantized((std::move(peek(stack, 0, 9))).toIntListRef(), | |
(std::move(peek(stack, 1, 9))).toTensor(), | |
- (std::move(peek(stack, 2, 9))).toIntListRef(), | |
+ (std::move(peek(stack, 2, 9))).toTensor(), | |
(std::move(peek(stack, 3, 9))).toIntListRef(), | |
options, | |
(std::move(peek(stack, 8, 9))).toOptional<c10::MemoryFormat>()); | |
#else | |
- auto result_ = torch::_empty_per_channel_affine_quantized_like((std::move(peek(stack, 0, 9))).toTensor(), | |
+ auto result_ = torch::_empty_per_channel_affine_quantized((std::move(peek(stack, 0, 9))).toIntListRef(), | |
(std::move(peek(stack, 1, 9))).toTensor(), | |
- (std::move(peek(stack, 2, 9))).toIntListRef(), | |
+ (std::move(peek(stack, 2, 9))).toTensor(), | |
(std::move(peek(stack, 3, 9))).toIntListRef(), | |
options, | |
(std::move(peek(stack, 8, 9))).toOptional<c10::MemoryFormat>()); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment