Created
September 14, 2022 15:25
-
-
Save AmosLewis/a3c3096ae50ccf2b6cfc3ec8b53f5763 to your computer and use it in GitHub Desktop.
bloom_genshark_debug.txt
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Args: ./../torch-mlir/build/bin/torch-mlir-opt -pass-pipeline=torch-backend-to-linalg-on-tensors-backend-pipeline --debug ./shark_tmp/_lambda.mlir | |
| Load new dialect in Context builtin | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementTypeInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ShapedType) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemRefLayoutAttrInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::SubElementAttrInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ElementsAttr) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::TypedAttr) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionKindInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::CastOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemoryEffectOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ResourceBlobManagerDialectInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmDialectInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::BytecodeDialectInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineBinaryOpExprStorage) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineConstantExprStorage) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineDimExprStorage) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::AffineMapStorage) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::IntegerSetStorage) | |
| Load new dialect in Context builtin | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::DebugActionManager::GenericHandler) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroOperands<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneRegion<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroResults<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroSuccessors<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NoRegionArguments<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NoTerminator<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::SingleBlock<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OpInvariants<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::AffineScope<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::IsIsolatedFromAbove<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::SymbolTable<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpAsmOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionKindInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::HasOnlyGraphRegion<Empty>) | |
| Load new dialect in Context torch | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferTypeOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::SymbolUserOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionBranchOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionBranchTerminatorOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::DialectInlinerInterface) | |
| Load new dialect in Context func | |
| Load new dialect in Context cf | |
| Load new dialect in Context arith | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::VectorUnrollOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferIntRangeInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::bufferization::BufferizableOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::BranchOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::CallOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::CallableOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::FunctionOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::AutomaticAllocationScope<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::CallableOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::FunctionOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ZeroRegions<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneResult<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::IntType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ConstantLike<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::MemoryEffectOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferTypeOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::BoolType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::ValueTensorType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::DialectResourceBlobHandle<mlir::BuiltinDialect>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::NoneType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::FloatType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::DeviceType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::torch::Torch::ListType>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::VariadicOperands<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::AllowsTypeRefinement<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneTypedResult<mlir::Type>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<2>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::ReadOnly<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<5>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::Torch::OpTrait::HasValueSemantics<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<4>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<3>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::OneOperand<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<7>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NOperands<6>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::NResults<2>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::HasParent<mlir::func::FuncOp>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::MemRefsNormalizable<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::ReturnLike<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::IsTerminator<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::DataLayoutSpecInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::detail::OpToOpPassAdaptor) | |
| Load new dialect in Context affine | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::AffineDmaStartOp) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::AffineMapAccessInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::AffineDmaWaitOp) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::LoopLikeOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::AffineReadOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::AffineWriteOpInterface) | |
| Load new dialect in Context linalg | |
| Load new dialect in Context math | |
| Load new dialect in Context memref | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::CopyOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ViewLikeOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OffsetSizeAndStrideOpInterface) | |
| Load new dialect in Context tensor | |
| Load new dialect in Context complex | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ReifyRankedShapedTypeOpInterface) | |
| Load new dialect in Context scf | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ParallelCombiningOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::TilingInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::DestinationStyleOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::LinalgOp) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::ContractionOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::ConvolutionOpInterface) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::FillOpInterface) | |
| Ignoring repeated interface registrationLoad new dialect in Context tm_tensor | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::TMTensor::TMTensorOp) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::TMTensor::ScalarLoopOpInterface) | |
| Load new dialect in Context torch_c | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'func.func'(0x872ee80) { | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x8675d90) { | |
| %0 = "torch.constant.int"() {value = 1024 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x8675bf0) { | |
| %1 = "torch.constant.int"() {value = 128 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86534f0) { | |
| %2 = "torch.constant.int"() {value = 1 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.bool'(0x86287b0) { | |
| %3 = "torch.constant.bool"() {value = true} : () -> !torch.bool | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x8699a70) { | |
| %4 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<2x1024xf32>} : () -> !torch.vtensor<[2,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x869d9d0) { | |
| %5 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x869da90) { | |
| %6 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x869db50) { | |
| %7 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86a2f90) { | |
| %8 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86a5a10) { | |
| %9 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86a6fb0) { | |
| %10 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86a7070) { | |
| %11 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86867b0) { | |
| %12 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x8686870) { | |
| %13 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ab2f0) { | |
| %14 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86add50) { | |
| %15 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b07d0) { | |
| %16 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0890) { | |
| %17 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0950) { | |
| %18 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0a10) { | |
| %19 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0ad0) { | |
| %20 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0b90) { | |
| %21 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0c50) { | |
| %22 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b0d10) { | |
| %23 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b1410) { | |
| %24 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b14d0) { | |
| %25 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2a70) { | |
| %26 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2b30) { | |
| %27 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2bf0) { | |
| %28 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2cb0) { | |
| %29 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2d70) { | |
| %30 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2e30) { | |
| %31 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2ef0) { | |
| %32 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b2fb0) { | |
| %33 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3070) { | |
| %34 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3130) { | |
| %35 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b31f0) { | |
| %36 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b32b0) { | |
| %37 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3370) { | |
| %38 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3430) { | |
| %39 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b34f0) { | |
| %40 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b35b0) { | |
| %41 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3670) { | |
| %42 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3730) { | |
| %43 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b37f0) { | |
| %44 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b38b0) { | |
| %45 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3970) { | |
| %46 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3a30) { | |
| %47 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4730) { | |
| %48 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b47f0) { | |
| %49 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b48b0) { | |
| %50 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4970) { | |
| %51 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4a30) { | |
| %52 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4af0) { | |
| %53 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4bb0) { | |
| %54 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4c70) { | |
| %55 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4d30) { | |
| %56 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4df0) { | |
| %57 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4eb0) { | |
| %58 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4f70) { | |
| %59 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5030) { | |
| %60 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b50f0) { | |
| %61 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b51b0) { | |
| %62 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5270) { | |
| %63 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5330) { | |
| %64 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b53f0) { | |
| %65 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b54b0) { | |
| %66 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5570) { | |
| %67 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5630) { | |
| %68 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b56f0) { | |
| %69 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b57b0) { | |
| %70 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5870) { | |
| %71 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5930) { | |
| %72 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b59f0) { | |
| %73 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5ab0) { | |
| %74 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5b70) { | |
| %75 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5c30) { | |
| %76 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5cf0) { | |
| %77 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5db0) { | |
| %78 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5e70) { | |
| %79 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5f30) { | |
| %80 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b5ff0) { | |
| %81 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b60b0) { | |
| %82 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6170) { | |
| %83 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6230) { | |
| %84 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b62f0) { | |
| %85 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b63b0) { | |
| %86 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6470) { | |
| %87 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6530) { | |
| %88 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b65f0) { | |
| %89 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b66b0) { | |
| %90 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6770) { | |
| %91 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6830) { | |
| %92 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b68f0) { | |
| %93 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b69b0) { | |
| %94 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6a70) { | |
| %95 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3af0) { | |
| %96 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3bb0) { | |
| %97 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3c70) { | |
| %98 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3d30) { | |
| %99 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3df0) { | |
| %100 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3eb0) { | |
| %101 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b3f70) { | |
| %102 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4030) { | |
| %103 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4110) { | |
| %104 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b41d0) { | |
| %105 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4290) { | |
| %106 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4350) { | |
| %107 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4410) { | |
| %108 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b44d0) { | |
| %109 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4590) { | |
| %110 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b4650) { | |
| %111 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8350) { | |
| %112 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b83f0) { | |
| %113 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b84b0) { | |
| %114 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8570) { | |
| %115 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8630) { | |
| %116 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b86f0) { | |
| %117 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b87b0) { | |
| %118 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8870) { | |
| %119 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8930) { | |
| %120 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b89f0) { | |
| %121 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8ab0) { | |
| %122 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8b70) { | |
| %123 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8c30) { | |
| %124 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8cf0) { | |
| %125 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8db0) { | |
| %126 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8e70) { | |
| %127 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8f30) { | |
| %128 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8ff0) { | |
| %129 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b90b0) { | |
| %130 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9170) { | |
| %131 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9230) { | |
| %132 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b92f0) { | |
| %133 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b93b0) { | |
| %134 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9470) { | |
| %135 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9530) { | |
| %136 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b95f0) { | |
| %137 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b96b0) { | |
| %138 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9770) { | |
| %139 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9830) { | |
| %140 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b98f0) { | |
| %141 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b99b0) { | |
| %142 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9a70) { | |
| %143 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9b30) { | |
| %144 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9bf0) { | |
| %145 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9cb0) { | |
| %146 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9d70) { | |
| %147 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9e30) { | |
| %148 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9ef0) { | |
| %149 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b9fb0) { | |
| %150 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba070) { | |
| %151 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba130) { | |
| %152 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba1f0) { | |
| %153 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba2b0) { | |
| %154 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba370) { | |
| %155 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba430) { | |
| %156 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba4f0) { | |
| %157 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba5b0) { | |
| %158 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba670) { | |
| %159 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba730) { | |
| %160 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba7f0) { | |
| %161 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba8b0) { | |
| %162 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86ba970) { | |
| %163 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86baa30) { | |
| %164 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86baaf0) { | |
| %165 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86babb0) { | |
| %166 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bac70) { | |
| %167 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bad30) { | |
| %168 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86badf0) { | |
| %169 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86baeb0) { | |
| %170 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86baf70) { | |
| %171 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb030) { | |
| %172 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb0f0) { | |
| %173 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb1b0) { | |
| %174 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb270) { | |
| %175 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb330) { | |
| %176 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb3f0) { | |
| %177 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb4b0) { | |
| %178 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb570) { | |
| %179 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb630) { | |
| %180 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb6f0) { | |
| %181 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb7b0) { | |
| %182 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb870) { | |
| %183 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb930) { | |
| %184 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bb9f0) { | |
| %185 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bbab0) { | |
| %186 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bbb70) { | |
| %187 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bbc30) { | |
| %188 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bbcf0) { | |
| %189 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bbdb0) { | |
| %190 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bbe70) { | |
| %191 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6b30) { | |
| %192 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6bf0) { | |
| %193 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6cb0) { | |
| %194 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6d70) { | |
| %195 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6e30) { | |
| %196 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6ef0) { | |
| %197 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b6fb0) { | |
| %198 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7070) { | |
| %199 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7130) { | |
| %200 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b71f0) { | |
| %201 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b72b0) { | |
| %202 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7370) { | |
| %203 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7430) { | |
| %204 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b74f0) { | |
| %205 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b75b0) { | |
| %206 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7670) { | |
| %207 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7730) { | |
| %208 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b77f0) { | |
| %209 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b78b0) { | |
| %210 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7970) { | |
| %211 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7a30) { | |
| %212 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7af0) { | |
| %213 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7bb0) { | |
| %214 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7c70) { | |
| %215 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7d30) { | |
| %216 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7df0) { | |
| %217 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7eb0) { | |
| %218 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b7f70) { | |
| %219 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8030) { | |
| %220 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b80f0) { | |
| %221 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b81b0) { | |
| %222 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86b8270) { | |
| %223 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bef50) { | |
| %224 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86beff0) { | |
| %225 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf0b0) { | |
| %226 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf170) { | |
| %227 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf230) { | |
| %228 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf2f0) { | |
| %229 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf3b0) { | |
| %230 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf470) { | |
| %231 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf530) { | |
| %232 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf5f0) { | |
| %233 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf6b0) { | |
| %234 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf770) { | |
| %235 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf830) { | |
| %236 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf8f0) { | |
| %237 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bf9b0) { | |
| %238 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfa70) { | |
| %239 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfb30) { | |
| %240 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfbf0) { | |
| %241 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfcb0) { | |
| %242 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfd70) { | |
| %243 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfe30) { | |
| %244 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bfef0) { | |
| %245 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86bffb0) { | |
| %246 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0070) { | |
| %247 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0130) { | |
| %248 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c01f0) { | |
| %249 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c02b0) { | |
| %250 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0370) { | |
| %251 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0430) { | |
| %252 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c04f0) { | |
| %253 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c05b0) { | |
| %254 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0670) { | |
| %255 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0730) { | |
| %256 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c07f0) { | |
| %257 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c08b0) { | |
| %258 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0970) { | |
| %259 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0a30) { | |
| %260 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0af0) { | |
| %261 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0bb0) { | |
| %262 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0c70) { | |
| %263 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0d30) { | |
| %264 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0df0) { | |
| %265 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0eb0) { | |
| %266 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c0f70) { | |
| %267 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1030) { | |
| %268 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c10f0) { | |
| %269 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c11b0) { | |
| %270 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1270) { | |
| %271 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1330) { | |
| %272 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c13f0) { | |
| %273 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c14b0) { | |
| %274 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1570) { | |
| %275 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1630) { | |
| %276 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c16f0) { | |
| %277 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c17b0) { | |
| %278 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1870) { | |
| %279 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1930) { | |
| %280 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c19f0) { | |
| %281 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1ab0) { | |
| %282 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1b70) { | |
| %283 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1c30) { | |
| %284 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x4096xf32>} : () -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1cf0) { | |
| %285 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096xf32>} : () -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1db0) { | |
| %286 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<4096x1024xf32>} : () -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1e70) { | |
| %287 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1f30) { | |
| %288 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c1ff0) { | |
| %289 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c20b0) { | |
| %290 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024x1024xf32>} : () -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c36d0) { | |
| %291 = "torch.vtensor.literal"() {value = dense<-3.40282347E+38> : tensor<f32>} : () -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c3770) { | |
| %292 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072xf32>} : () -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c3830) { | |
| %293 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<3072x1024xf32>} : () -> !torch.vtensor<[3072,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c38f0) { | |
| %294 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c39b0) { | |
| %295 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c4f70) { | |
| %296 = "torch.vtensor.literal"() {value = dense<0.707106769> : tensor<f32>} : () -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c5010) { | |
| %297 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c50d0) { | |
| %298 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<1024xf32>} : () -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.vtensor.literal'(0x86c6670) { | |
| %299 = "torch.vtensor.literal"() {value = dense_resource<__elided__> : tensor<250880x1024xf32>} : () -> !torch.vtensor<[250880,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.none'(0x86c6730) { | |
| %300 = "torch.constant.none"() : () -> !torch.none | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86c7cf0) { | |
| %301 = "torch.constant.float"() {value = -3.4028234663852886E+38 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.bool'(0x86c7e70) { | |
| %302 = "torch.constant.bool"() {value = false} : () -> !torch.bool | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86c7f10) { | |
| %303 = "torch.constant.int"() {value = -1 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86c94d0) { | |
| %304 = "torch.constant.float"() {value = 1.000000e-05 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86cbf40) { | |
| %305 = "torch.constant.int"() {value = 0 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86cd4c0) { | |
| %306 = "torch.constant.int"() {value = 2 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d00d0) { | |
| %307 = "torch.constant.int"() {value = 17 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d16f0) { | |
| %308 = "torch.constant.int"() {value = 9223372036854775807 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d2cb0) { | |
| %309 = "torch.constant.int"() {value = 16 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d3560) { | |
| %310 = "torch.constant.int"() {value = 3 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86d5bb0) { | |
| %311 = "torch.constant.float"() {value = 1.000000e+00 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d5c90) { | |
| %312 = "torch.constant.int"() {value = 3072 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d5d30) { | |
| %313 = "torch.constant.int"() {value = 192 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86d72b0) { | |
| %314 = "torch.constant.int"() {value = 64 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86da060) { | |
| %315 = "torch.constant.float"() {value = 1.250000e-01 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86da140) { | |
| %316 = "torch.constant.int"() {value = 4096 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86da200) { | |
| %317 = "torch.constant.float"() {value = 5.000000e-01 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86da2e0) { | |
| %318 = "torch.constant.float"() {value = 0.79788455999999996 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86db8a0) { | |
| %319 = "torch.constant.float"() {value = 4.471500e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86db980) { | |
| %320 = "torch.constant.float"() {value = 6.250000e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86dba60) { | |
| %321 = "torch.constant.float"() {value = 0.041666666666666664 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86dd020) { | |
| %322 = "torch.constant.float"() {value = 0.33333333333333331 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86dd100) { | |
| %323 = "torch.constant.float"() {value = 3.125000e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86de6c0) { | |
| %324 = "torch.constant.float"() {value = 2.500000e-01 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86de7a0) { | |
| %325 = "torch.constant.int"() {value = 4 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86de860) { | |
| %326 = "torch.constant.float"() {value = 2.500000e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86de940) { | |
| %327 = "torch.constant.float"() {value = 2.000000e-01 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86dea20) { | |
| %328 = "torch.constant.int"() {value = 5 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86deae0) { | |
| %329 = "torch.constant.float"() {value = 0.020833333333333332 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86debc0) { | |
| %330 = "torch.constant.float"() {value = 0.16666666666666666 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86df2b0) { | |
| %331 = "torch.constant.int"() {value = 6 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86df370) { | |
| %332 = "torch.constant.float"() {value = 0.017857142857142856 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86df450) { | |
| %333 = "torch.constant.float"() {value = 0.14285714285714285 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86dfd40) { | |
| %334 = "torch.constant.int"() {value = 7 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86dfe00) { | |
| %335 = "torch.constant.float"() {value = 1.562500e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86dfee0) { | |
| %336 = "torch.constant.int"() {value = 8 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86dffa0) { | |
| %337 = "torch.constant.float"() {value = 0.013888888888888888 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e0080) { | |
| %338 = "torch.constant.float"() {value = 0.1111111111111111 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e0160) { | |
| %339 = "torch.constant.int"() {value = 9 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e0220) { | |
| %340 = "torch.constant.float"() {value = 1.250000e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e0300) { | |
| %341 = "torch.constant.float"() {value = 1.000000e-01 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e03e0) { | |
| %342 = "torch.constant.int"() {value = 10 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e04a0) { | |
| %343 = "torch.constant.float"() {value = 0.011363636363636364 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e0580) { | |
| %344 = "torch.constant.float"() {value = 0.090909090909090911 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e0660) { | |
| %345 = "torch.constant.int"() {value = 11 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e17f0) { | |
| %346 = "torch.constant.float"() {value = 0.010416666666666666 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e18d0) { | |
| %347 = "torch.constant.float"() {value = 0.083333333333333329 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e19b0) { | |
| %348 = "torch.constant.int"() {value = 12 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e2280) { | |
| %349 = "torch.constant.float"() {value = 0.0096153846153846159 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e2360) { | |
| %350 = "torch.constant.float"() {value = 0.076923076923076927 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e2440) { | |
| %351 = "torch.constant.int"() {value = 13 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e2500) { | |
| %352 = "torch.constant.float"() {value = 0.0089285714285714281 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e25e0) { | |
| %353 = "torch.constant.float"() {value = 0.071428571428571425 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e26c0) { | |
| %354 = "torch.constant.int"() {value = 14 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e2780) { | |
| %355 = "torch.constant.float"() {value = 0.0083333333333333332 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e3070) { | |
| %356 = "torch.constant.float"() {value = 0.066666666666666666 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e3150) { | |
| %357 = "torch.constant.int"() {value = 15 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e3210) { | |
| %358 = "torch.constant.float"() {value = 7.812500e-03 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e32f0) { | |
| %359 = "torch.constant.float"() {value = 0.0073529411764705881 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e33d0) { | |
| %360 = "torch.constant.float"() {value = 0.058823529411764705 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e34b0) { | |
| %361 = "torch.constant.float"() {value = 0.0069444444444444441 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e3590) { | |
| %362 = "torch.constant.float"() {value = 0.055555555555555552 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86deca0) { | |
| %363 = "torch.constant.int"() {value = 18 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86ded60) { | |
| %364 = "torch.constant.float"() {value = 0.0065789473684210523 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86dee40) { | |
| %365 = "torch.constant.float"() {value = 0.052631578947368418 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86def20) { | |
| %366 = "torch.constant.int"() {value = 19 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86defe0) { | |
| %367 = "torch.constant.float"() {value = 6.250000e-03 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86df0c0) { | |
| %368 = "torch.constant.float"() {value = 5.000000e-02 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86df1a0) { | |
| %369 = "torch.constant.int"() {value = 20 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e4240) { | |
| %370 = "torch.constant.float"() {value = 0.0059523809523809521 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e4310) { | |
| %371 = "torch.constant.float"() {value = 0.047619047619047616 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e43f0) { | |
| %372 = "torch.constant.int"() {value = 21 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e44b0) { | |
| %373 = "torch.constant.float"() {value = 0.005681818181818182 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e4590) { | |
| %374 = "torch.constant.float"() {value = 0.045454545454545456 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e4670) { | |
| %375 = "torch.constant.int"() {value = 22 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e4730) { | |
| %376 = "torch.constant.float"() {value = 0.005434782608695652 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.float'(0x86e4810) { | |
| %377 = "torch.constant.float"() {value = 0.043478260869565216 : f64} : () -> !torch.float | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.int'(0x86e48f0) { | |
| %378 = "torch.constant.int"() {value = 23 : i64} : () -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.constant.device'(0x86e4990) { | |
| %379 = "torch.constant.device"() {value = "cpu"} : () -> !torch.Device | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86e6bf0) { | |
| %380 = "torch.prim.ListConstruct"(%303, %1) : (!torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86e6d00) { | |
| %381 = "torch.aten.view"(%arg0, %380) : (!torch.vtensor<[?,?],si64>, !torch.list<int>) -> !torch.vtensor<[?,128],si64> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.embedding'(0x85d6710) { | |
| %382 = "torch.aten.embedding"(%299, %381, %303, %302, %302) : (!torch.vtensor<[250880,1024],f32>, !torch.vtensor<[?,128],si64>, !torch.int, !torch.bool, !torch.bool) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86e8490) { | |
| %383 = "torch.prim.ListConstruct"(%306) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86e6e10) { | |
| %384 = "torch.aten.sum.dim_IntList"(%382, %383, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86bbf90) { | |
| %385 = "torch.aten.div.Scalar"(%384, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86bc0a0) { | |
| %386 = "torch.aten.size.int"(%382, %305) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86c7d50) { | |
| %387 = "torch.prim.ListConstruct"(%386, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86bd6f0) { | |
| %388 = "torch.aten.broadcast_to"(%385, %387) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86bd800) { | |
| %389 = "torch.aten.sub.Tensor"(%382, %388, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86be140) { | |
| %390 = "torch.aten.mul.Tensor"(%389, %389) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86be250) { | |
| %391 = "torch.aten.sum.dim_IntList"(%390, %383, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86be3a0) { | |
| %392 = "torch.aten.div.Scalar"(%391, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86be4b0) { | |
| %393 = "torch.aten.add.Scalar"(%392, %304, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86be5e0) { | |
| %394 = "torch.aten.rsqrt"(%393) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86be6d0) { | |
| %395 = "torch.aten.size.int"(%382, %305) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86be7e0) { | |
| %396 = "torch.prim.ListConstruct"(%395, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86be910) { | |
| %397 = "torch.aten.broadcast_to"(%394, %396) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86bea20) { | |
| %398 = "torch.aten.mul.Tensor"(%389, %397) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86beb30) { | |
| %399 = "torch.aten.mul.Tensor"(%398, %298) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86bec40) { | |
| %400 = "torch.aten.add.Tensor"(%399, %297, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86bed70) { | |
| %401 = "torch.prim.ListConstruct"(%2, %1) : (!torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.ones'(0x85d6820) { | |
| %402 = "torch.aten.ones"(%401, %300, %300, %379, %302) : (!torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool) -> !torch.vtensor<[1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x86eff10) { | |
| %403 = "torch.aten.clone"(%296, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.arange.start_step'(0x86f0020) { | |
| %404 = "torch.aten.arange.start_step"(%2, %307, %2, %310, %300, %379, %302) : (!torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool) -> !torch.vtensor<[?],si32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.pow.Tensor_Tensor'(0x86f01d0) { | |
| %405 = "torch.aten.pow.Tensor_Tensor"(%403, %404) : (!torch.vtensor<[],f32>, !torch.vtensor<[?],si32>) -> !torch.vtensor<[?],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.cumsum'(0x86f02e0) { | |
| %406 = "torch.aten.cumsum"(%402, %303, %300) : (!torch.vtensor<[1,128],f32>, !torch.int, !torch.none) -> !torch.vtensor<[1,128],f32> | |
| * Fold { | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::DialectFoldInterface) | |
| } -> FAILURE : unable to fold | |
| * Pattern : 'torch.aten.cumsum -> ()' { | |
| Trying to match "(anonymous namespace)::ConvertAtenCumsumOp" | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::InferIntRangeInterface::Trait<Empty>) | |
| ** Insert : 'arith.constant'(0x879a870) | |
| ** Insert : 'arith.constant'(0x879a9c0) | |
| ** Insert : 'arith.constant'(0x87e1780) | |
| ** Insert : 'arith.constant'(0x87e0ff0) | |
| ** Insert : 'linalg.init_tensor'(0x8857dc0) | |
| ** Insert : 'arith.constant'(0x87e10e0) | |
| ** Insert : 'linalg.fill'(0x885c940) | |
| ** Insert : 'tensor.cast'(0x885ca70) | |
| ** Insert : 'linalg.init_tensor'(0x885dfe0) | |
| ** Insert : 'arith.constant'(0x87e11f0) | |
| ** Insert : 'linalg.fill'(0x885e070) | |
| ** Insert : 'tensor.cast'(0x885e130) | |
| ** Insert : 'tm_tensor.scan'(0x8774a40) | |
| ** Insert : 'tensor.cast'(0x885f750) | |
| ** Replace : 'torch.aten.cumsum'(0x86f02e0) | |
| "(anonymous namespace)::ConvertAtenCumsumOp" result 1 | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'arith.constant'(0x879a870) { | |
| %408 = "arith.constant"() {value = 0 : index} : () -> index | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'arith.constant'(0x879a9c0) { | |
| %409 = "arith.constant"() {value = 1 : index} : () -> index | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'arith.constant'(0x87e1780) { | |
| %410 = "arith.constant"() {value = 1 : index} : () -> index | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'arith.constant'(0x87e0ff0) { | |
| %411 = "arith.constant"() {value = 128 : index} : () -> index | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'linalg.init_tensor'(0x8857dc0) { | |
| %412 = "linalg.init_tensor"(%409, %411) {static_sizes = [-1, -1]} : (index, index) -> tensor<?x?xf32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'arith.constant'(0x87e10e0) { | |
| %413 = "arith.constant"() {value = 0.000000e+00 : f32} : () -> f32 | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'linalg.fill'(0x885c940) { | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'tensor.cast'(0x885ca70) { | |
| %415 = "tensor.cast"(%414) : (tensor<?x?xf32>) -> tensor<1x128xf32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'linalg.init_tensor'(0x885dfe0) { | |
| %416 = "linalg.init_tensor"(%409) {static_sizes = [-1]} : (index) -> tensor<?xf32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'arith.constant'(0x87e11f0) { | |
| %417 = "arith.constant"() {value = 0.000000e+00 : f32} : () -> f32 | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'linalg.fill'(0x885e070) { | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'tensor.cast'(0x885e130) { | |
| %419 = "tensor.cast"(%418) : (tensor<?xf32>) -> tensor<1xf32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'tm_tensor.scan'(0x8774a40) { | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'tensor.cast'(0x885f750) { | |
| %421 = "tensor.cast"(%420#0) : (tensor<1x128xf32>) -> tensor<1x128xf32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| } -> SUCCESS : pattern applied successfully | |
| // *** IR Dump After Pattern Application *** | |
| mlir-asm-printer: Verifying operation: func.func | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::VariadicResults<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::SingleBlockImplicitTerminator<mlir::linalg::YieldOp>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::AttrSizedOperandSegments<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::DestinationStyleOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::LinalgOp::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::RegionBranchOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::ReifyRankedShapedTypeOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::linalg::FillOpInterface::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::OpTrait::SingleBlockImplicitTerminator<mlir::torch::TMTensor::YieldOp>::Impl<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::TMTensor::TMTensorOp::Trait<Empty>) | |
| ImplicitTypeIDRegistry::lookupOrInsert(mlir::torch::TMTensor::ScalarLoopOpInterface::Trait<Empty>) | |
| func.func @forward(%arg0: !torch.vtensor<[?,?],si64>) -> !torch.vtensor<[?,2],f32> { | |
| %int1024 = torch.constant.int 1024 | |
| %int128 = torch.constant.int 128 | |
| %int1 = torch.constant.int 1 | |
| %true = torch.constant.bool true | |
| %0 = torch.vtensor.literal(dense_resource<__elided__> : tensor<2x1024xf32>) : !torch.vtensor<[2,1024],f32> | |
| %1 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %2 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %3 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %4 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %5 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %6 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %7 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %8 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %9 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %10 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %11 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %12 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %13 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %14 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %15 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %16 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %17 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %18 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %19 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %20 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %21 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %22 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %23 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %24 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %25 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %26 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %27 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %28 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %29 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %30 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %31 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %32 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %33 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %34 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %35 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %36 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %37 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %38 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %39 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %40 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %41 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %42 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %43 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %44 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %45 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %46 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %47 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %48 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %49 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %50 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %51 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %52 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %53 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %54 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %55 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %56 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %57 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %58 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %59 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %60 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %61 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %62 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %63 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %64 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %65 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %66 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %67 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %68 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %69 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %70 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %71 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %72 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %73 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %74 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %75 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %76 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %77 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %78 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %79 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %80 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %81 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %82 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %83 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %84 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %85 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %86 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %87 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %88 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %89 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %90 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %91 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %92 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %93 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %94 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %95 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %96 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %97 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %98 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %99 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %100 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %101 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %102 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %103 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %104 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %105 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %106 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %107 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %108 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %109 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %110 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %111 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %112 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %113 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %114 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %115 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %116 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %117 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %118 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %119 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %120 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %121 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %122 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %123 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %124 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %125 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %126 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %127 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %128 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %129 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %130 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %131 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %132 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %133 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %134 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %135 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %136 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %137 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %138 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %139 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %140 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %141 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %142 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %143 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %144 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %145 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %146 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %147 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %148 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %149 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %150 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %151 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %152 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %153 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %154 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %155 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %156 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %157 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %158 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %159 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %160 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %161 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %162 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %163 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %164 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %165 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %166 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %167 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %168 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %169 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %170 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %171 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %172 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %173 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %174 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %175 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %176 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %177 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %178 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %179 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %180 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %181 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %182 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %183 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %184 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %185 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %186 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %187 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %188 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %189 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %190 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %191 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %192 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %193 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %194 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %195 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %196 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %197 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %198 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %199 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %200 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %201 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %202 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %203 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %204 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %205 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %206 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %207 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %208 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %209 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %210 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %211 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %212 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %213 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %214 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %215 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %216 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %217 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %218 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %219 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %220 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %221 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %222 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %223 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %224 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %225 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %226 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %227 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %228 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %229 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %230 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %231 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %232 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %233 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %234 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %235 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %236 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %237 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %238 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %239 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %240 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %241 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %242 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %243 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %244 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %245 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %246 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %247 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %248 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %249 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %250 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %251 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %252 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %253 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %254 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %255 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %256 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %257 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %258 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %259 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %260 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %261 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %262 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %263 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %264 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %265 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %266 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %267 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %268 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %269 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %270 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %271 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %272 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %273 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %274 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %275 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %276 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %277 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %278 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %279 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %280 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x4096xf32>) : !torch.vtensor<[1024,4096],f32> | |
| %281 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096xf32>) : !torch.vtensor<[4096],f32> | |
| %282 = torch.vtensor.literal(dense_resource<__elided__> : tensor<4096x1024xf32>) : !torch.vtensor<[4096,1024],f32> | |
| %283 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %284 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %285 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %286 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024x1024xf32>) : !torch.vtensor<[1024,1024],f32> | |
| %287 = torch.vtensor.literal(dense<-3.40282347E+38> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %288 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072xf32>) : !torch.vtensor<[3072],f32> | |
| %289 = torch.vtensor.literal(dense_resource<__elided__> : tensor<3072x1024xf32>) : !torch.vtensor<[3072,1024],f32> | |
| %290 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %291 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %292 = torch.vtensor.literal(dense<0.707106769> : tensor<f32>) : !torch.vtensor<[],f32> | |
| %293 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %294 = torch.vtensor.literal(dense_resource<__elided__> : tensor<1024xf32>) : !torch.vtensor<[1024],f32> | |
| %295 = torch.vtensor.literal(dense_resource<__elided__> : tensor<250880x1024xf32>) : !torch.vtensor<[250880,1024],f32> | |
| %none = torch.constant.none | |
| %float-3.402820e38 = torch.constant.float -3.4028234663852886E+38 | |
| %false = torch.constant.bool false | |
| %int-1 = torch.constant.int -1 | |
| %296 = builtin.unrealized_conversion_cast %int-1 : !torch.int to i64 | |
| %float1.000000e-05 = torch.constant.float 1.000000e-05 | |
| %int0 = torch.constant.int 0 | |
| %int2 = torch.constant.int 2 | |
| %int17 = torch.constant.int 17 | |
| %int9223372036854775807 = torch.constant.int 9223372036854775807 | |
| %int16 = torch.constant.int 16 | |
| %int3 = torch.constant.int 3 | |
| %float1.000000e00 = torch.constant.float 1.000000e+00 | |
| %int3072 = torch.constant.int 3072 | |
| %int192 = torch.constant.int 192 | |
| %int64 = torch.constant.int 64 | |
| %float1.250000e-01 = torch.constant.float 1.250000e-01 | |
| %int4096 = torch.constant.int 4096 | |
| %float5.000000e-01 = torch.constant.float 5.000000e-01 | |
| %float7.978850e-01 = torch.constant.float 0.79788455999999996 | |
| %float4.471500e-02 = torch.constant.float 4.471500e-02 | |
| %float6.250000e-02 = torch.constant.float 6.250000e-02 | |
| %float4.166670e-02 = torch.constant.float 0.041666666666666664 | |
| %float3.333330e-01 = torch.constant.float 0.33333333333333331 | |
| %float3.125000e-02 = torch.constant.float 3.125000e-02 | |
| %float2.500000e-01 = torch.constant.float 2.500000e-01 | |
| %int4 = torch.constant.int 4 | |
| %float2.500000e-02 = torch.constant.float 2.500000e-02 | |
| %float2.000000e-01 = torch.constant.float 2.000000e-01 | |
| %int5 = torch.constant.int 5 | |
| %float2.083330e-02 = torch.constant.float 0.020833333333333332 | |
| %float1.666670e-01 = torch.constant.float 0.16666666666666666 | |
| %int6 = torch.constant.int 6 | |
| %float1.785710e-02 = torch.constant.float 0.017857142857142856 | |
| %float1.428570e-01 = torch.constant.float 0.14285714285714285 | |
| %int7 = torch.constant.int 7 | |
| %float1.562500e-02 = torch.constant.float 1.562500e-02 | |
| %int8 = torch.constant.int 8 | |
| %float1.388890e-02 = torch.constant.float 0.013888888888888888 | |
| %float1.111110e-01 = torch.constant.float 0.1111111111111111 | |
| %int9 = torch.constant.int 9 | |
| %float1.250000e-02 = torch.constant.float 1.250000e-02 | |
| %float1.000000e-01 = torch.constant.float 1.000000e-01 | |
| %int10 = torch.constant.int 10 | |
| %float1.136360e-02 = torch.constant.float 0.011363636363636364 | |
| %float9.090900e-02 = torch.constant.float 0.090909090909090911 | |
| %int11 = torch.constant.int 11 | |
| %float1.041670e-02 = torch.constant.float 0.010416666666666666 | |
| %float8.333330e-02 = torch.constant.float 0.083333333333333329 | |
| %int12 = torch.constant.int 12 | |
| %float9.615380e-03 = torch.constant.float 0.0096153846153846159 | |
| %float7.692310e-02 = torch.constant.float 0.076923076923076927 | |
| %int13 = torch.constant.int 13 | |
| %float8.928570e-03 = torch.constant.float 0.0089285714285714281 | |
| %float7.142860e-02 = torch.constant.float 0.071428571428571425 | |
| %int14 = torch.constant.int 14 | |
| %float8.333330e-03 = torch.constant.float 0.0083333333333333332 | |
| %float6.666660e-02 = torch.constant.float 0.066666666666666666 | |
| %int15 = torch.constant.int 15 | |
| %float7.812500e-03 = torch.constant.float 7.812500e-03 | |
| %float7.352940e-03 = torch.constant.float 0.0073529411764705881 | |
| %float5.882350e-02 = torch.constant.float 0.058823529411764705 | |
| %float6.944440e-03 = torch.constant.float 0.0069444444444444441 | |
| %float5.555560e-02 = torch.constant.float 0.055555555555555552 | |
| %int18 = torch.constant.int 18 | |
| %float6.578940e-03 = torch.constant.float 0.0065789473684210523 | |
| %float5.263160e-02 = torch.constant.float 0.052631578947368418 | |
| %int19 = torch.constant.int 19 | |
| %float6.250000e-03 = torch.constant.float 6.250000e-03 | |
| %float5.000000e-02 = torch.constant.float 5.000000e-02 | |
| %int20 = torch.constant.int 20 | |
| %float5.952380e-03 = torch.constant.float 0.0059523809523809521 | |
| %float4.761900e-02 = torch.constant.float 0.047619047619047616 | |
| %int21 = torch.constant.int 21 | |
| %float5.681820e-03 = torch.constant.float 0.005681818181818182 | |
| %float4.545450e-02 = torch.constant.float 0.045454545454545456 | |
| %int22 = torch.constant.int 22 | |
| %float5.434780e-03 = torch.constant.float 0.005434782608695652 | |
| %float4.347830e-02 = torch.constant.float 0.043478260869565216 | |
| %int23 = torch.constant.int 23 | |
| %cpu = torch.constant.device "cpu" | |
| %297 = torch.prim.ListConstruct %int-1, %int128 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %298 = torch.aten.view %arg0, %297 : !torch.vtensor<[?,?],si64>, !torch.list<int> -> !torch.vtensor<[?,128],si64> | |
| %299 = torch.aten.embedding %295, %298, %int-1, %false, %false : !torch.vtensor<[250880,1024],f32>, !torch.vtensor<[?,128],si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,128,1024],f32> | |
| %300 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %301 = torch.aten.sum.dim_IntList %299, %300, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %302 = torch.aten.div.Scalar %301, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %303 = torch.aten.size.int %299, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %304 = torch.prim.ListConstruct %303, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %305 = torch.aten.broadcast_to %302, %304 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %306 = torch.aten.sub.Tensor %299, %305, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %307 = torch.aten.mul.Tensor %306, %306 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %308 = torch.aten.sum.dim_IntList %307, %300, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %309 = torch.aten.div.Scalar %308, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %310 = torch.aten.add.Scalar %309, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %311 = torch.aten.rsqrt %310 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %312 = torch.aten.size.int %299, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %313 = torch.prim.ListConstruct %312, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %314 = torch.aten.broadcast_to %311, %313 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %315 = torch.aten.mul.Tensor %306, %314 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %316 = torch.aten.mul.Tensor %315, %294 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %317 = torch.aten.add.Tensor %316, %293, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %318 = torch.prim.ListConstruct %int1, %int128 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %319 = torch.aten.ones %318, %none, %none, %cpu, %false : !torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[1,128],f32> | |
| %320 = builtin.unrealized_conversion_cast %319 : !torch.vtensor<[1,128],f32> to tensor<1x128xf32> | |
| %321 = torch.aten.clone %292, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %322 = torch.aten.arange.start_step %int1, %int17, %int1, %int3, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[?],si32> | |
| %323 = torch.aten.pow.Tensor_Tensor %321, %322 : !torch.vtensor<[],f32>, !torch.vtensor<[?],si32> -> !torch.vtensor<[?],f32> | |
| %c0 = arith.constant 0 : index | |
| %c1 = arith.constant 1 : index | |
| %c1_0 = arith.constant 1 : index | |
| %c128 = arith.constant 128 : index | |
| %324 = linalg.init_tensor [%c1, %c128] : tensor<?x?xf32> | |
| %cst = arith.constant 0.000000e+00 : f32 | |
| %325 = linalg.fill ins(%cst : f32) outs(%324 : tensor<?x?xf32>) -> tensor<?x?xf32> | |
| %326 = tensor.cast %325 : tensor<?x?xf32> to tensor<1x128xf32> | |
| %327 = linalg.init_tensor [%c1] : tensor<?xf32> | |
| %cst_1 = arith.constant 0.000000e+00 : f32 | |
| %328 = linalg.fill ins(%cst_1 : f32) outs(%327 : tensor<?xf32>) -> tensor<?xf32> | |
| %329 = tensor.cast %328 : tensor<?xf32> to tensor<1xf32> | |
| %330:2 = tm_tensor.scan dimension(1) inclusive(true) ins(%320 : tensor<1x128xf32>) outs(%326, %329 : tensor<1x128xf32>, tensor<1xf32>) { | |
| ^bb0(%arg1: f32, %arg2: f32): | |
| %2961 = arith.addf %arg1, %arg2 : f32 | |
| tm_tensor.yield %2961 : f32 | |
| } -> tensor<1x128xf32>, tensor<1xf32> | |
| %331 = tensor.cast %330#0 : tensor<1x128xf32> to tensor<1x128xf32> | |
| %332 = torch.aten.cumsum %319, %int-1, %none : !torch.vtensor<[1,128],f32>, !torch.int, !torch.none -> !torch.vtensor<[1,128],f32> | |
| %333 = torch.aten.slice.Tensor %332, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128],f32> | |
| %334 = torch.aten.unsqueeze %333, %int1 : !torch.vtensor<[1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128],f32> | |
| %335 = torch.aten.slice.Tensor %334, %int2, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,128],f32> | |
| %336 = torch.aten.sub.Scalar %335, %int1, %int1 : !torch.vtensor<[1,1,128],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,1,128],f32> | |
| %337 = torch.aten.slice.Tensor %319, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128],f32> | |
| %338 = torch.aten.unsqueeze %337, %int1 : !torch.vtensor<[1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128],f32> | |
| %339 = torch.aten.mul.Tensor %336, %338 : !torch.vtensor<[1,1,128],f32>, !torch.vtensor<[1,1,128],f32> -> !torch.vtensor<[1,1,128],f32> | |
| %340 = torch.aten.unsqueeze %323, %int-1 : !torch.vtensor<[?],f32>, !torch.int -> !torch.vtensor<[?,1],f32> | |
| %341 = torch.aten.mul.Tensor %340, %339 : !torch.vtensor<[?,1],f32>, !torch.vtensor<[1,1,128],f32> -> !torch.vtensor<[1,?,128],f32> | |
| %342 = torch.aten.slice.Tensor %319, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128],f32> | |
| %343 = torch.aten.unsqueeze %342, %int1 : !torch.vtensor<[1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128],f32> | |
| %344 = torch.aten.mul.Tensor %341, %343 : !torch.vtensor<[1,?,128],f32>, !torch.vtensor<[1,1,128],f32> -> !torch.vtensor<[1,?,128],f32> | |
| %345 = torch.prim.ListConstruct %int16, %int1, %int128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %346 = torch.aten.view %344, %345 : !torch.vtensor<[1,?,128],f32>, !torch.list<int> -> !torch.vtensor<[16,1,128],f32> | |
| %347 = torch.prim.ListConstruct %int128, %int128 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %348 = torch.aten.empty.memory_format %347, %none, %none, %cpu, %false, %none : !torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool, !torch.none -> !torch.vtensor<[128,128],f32> | |
| %349 = torch.valsem.aten.fill.Scalar %348, %float-3.402820e38 : !torch.vtensor<[128,128],f32>, !torch.float -> !torch.vtensor<[128,128],f32> | |
| %350 = torch.aten.arange.start_step %int0, %int128, %int1, %none, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[?],si64> | |
| %351 = torch.aten.add.Scalar %350, %int1, %int1 : !torch.vtensor<[?],si64>, !torch.int, !torch.int -> !torch.vtensor<[?],si64> | |
| %352 = torch.prim.ListConstruct %int128, %int1 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %353 = torch.aten.view %351, %352 : !torch.vtensor<[?],si64>, !torch.list<int> -> !torch.vtensor<[128,1],si64> | |
| %354 = torch.aten.lt.Tensor %350, %353 : !torch.vtensor<[?],si64>, !torch.vtensor<[128,1],si64> -> !torch.vtensor<[128,?],i1> | |
| %355 = torch.aten.masked_fill.Scalar %349, %354, %int0 : !torch.vtensor<[128,128],f32>, !torch.vtensor<[128,?],i1>, !torch.int -> !torch.vtensor<[128,128],f32> | |
| %356 = torch.aten.unsqueeze %355, %int0 : !torch.vtensor<[128,128],f32>, !torch.int -> !torch.vtensor<[1,128,128],f32> | |
| %357 = torch.aten.unsqueeze %356, %int1 : !torch.vtensor<[1,128,128],f32>, !torch.int -> !torch.vtensor<[1,1,128,128],f32> | |
| %358 = torch.aten.slice.Tensor %357, %int2, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,128,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],f32> | |
| %359 = torch.aten.slice.Tensor %358, %int3, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,128,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,128,128],f32> | |
| %360 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %361 = torch.aten.broadcast_to %359, %360 : !torch.vtensor<[1,1,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],f32> | |
| %362 = torch.aten.slice.Tensor %319, %int0, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128],f32> | |
| %363 = torch.aten.unsqueeze %362, %int1 : !torch.vtensor<[1,128],f32>, !torch.int -> !torch.vtensor<[1,1,128],f32> | |
| %364 = torch.aten.unsqueeze %363, %int2 : !torch.vtensor<[1,1,128],f32>, !torch.int -> !torch.vtensor<[1,1,1,128],f32> | |
| %365 = torch.aten.slice.Tensor %364, %int3, %int0, %int9223372036854775807, %int1 : !torch.vtensor<[1,1,1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,1,1,128],f32> | |
| %366 = torch.aten.broadcast_to %365, %360 : !torch.vtensor<[1,1,1,128],f32>, !torch.list<int> -> !torch.vtensor<[1,1,128,128],f32> | |
| %367 = torch.aten.rsub.Scalar %366, %float1.000000e00, %int1 : !torch.vtensor<[1,1,128,128],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,1,128,128],f32> | |
| %368 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %369 = torch.aten.empty.memory_format %368, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %370 = torch.valsem.aten.copy %369, %367, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %371 = torch.aten.masked_fill.Scalar %367, %370, %float-3.402820e38 : !torch.vtensor<[1,1,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.float -> !torch.vtensor<[1,1,128,128],f32> | |
| %372 = torch.aten.add.Tensor %371, %361, %int1 : !torch.vtensor<[1,1,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,1,128,128],f32> | |
| %373 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %374 = torch.aten.sum.dim_IntList %317, %373, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %375 = torch.aten.div.Scalar %374, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %376 = torch.aten.size.int %317, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %377 = torch.prim.ListConstruct %376, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %378 = torch.aten.broadcast_to %375, %377 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %379 = torch.aten.sub.Tensor %317, %378, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %380 = torch.aten.mul.Tensor %379, %379 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %381 = torch.aten.sum.dim_IntList %380, %373, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %382 = torch.aten.div.Scalar %381, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %383 = torch.aten.add.Scalar %382, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %384 = torch.aten.rsqrt %383 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %385 = torch.aten.size.int %317, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %386 = torch.prim.ListConstruct %385, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %387 = torch.aten.broadcast_to %384, %386 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %388 = torch.aten.mul.Tensor %379, %387 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %389 = torch.aten.mul.Tensor %388, %291 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %390 = torch.aten.add.Tensor %389, %290, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %391 = torch.aten.transpose.int %289, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %392 = torch.prim.ListConstruct %int128, %int1024 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %393 = torch.aten.view %390, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %394 = torch.aten.mm %393, %391 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %395 = torch.aten.mul.Scalar %288, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %396 = torch.aten.add.Tensor %395, %394, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %397 = torch.prim.ListConstruct %int1, %int128, %int3072 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %398 = torch.aten.view %396, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %399 = torch.prim.ListConstruct %int1, %int128, %int16, %int192 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %400 = torch.aten.view %398, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %401 = torch.aten.slice.Tensor %400, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %402 = torch.aten.slice.Tensor %400, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %403 = torch.aten.slice.Tensor %400, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %404 = torch.aten.transpose.int %401, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %405 = torch.prim.ListConstruct %int16, %int128, %int64 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %406 = torch.aten.view %404, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %407 = torch.prim.ListConstruct %int0, %int2, %int3, %int1 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %408 = torch.aten.permute %402, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %409 = torch.prim.ListConstruct %int16, %int64, %int128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %410 = torch.aten.view %408, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %411 = torch.aten.bmm %406, %410 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %412 = torch.aten.mul.Scalar %411, %float1.250000e-01 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %413 = torch.aten.mul.Scalar %346, %float1.000000e00 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %414 = torch.aten.add.Tensor %412, %413, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %415 = torch.prim.ListConstruct %int-1, %int16, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %416 = torch.aten.view %414, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %417 = torch.aten.mul.Scalar %416, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %418 = torch.aten.add.Tensor %417, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %419 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %420 = torch.aten.maximum %418, %419 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values, %indices = torch.aten.max.dim %420, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %421 = torch.aten.sub.Tensor %420, %values, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %422 = torch.aten.exp %421 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %423 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %424 = torch.aten.sum.dim_IntList %422, %423, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %425 = torch.aten.div.Tensor %422, %424 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %426 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %427 = torch.aten.empty.memory_format %426, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %428 = torch.valsem.aten.copy %427, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %429 = torch.aten.bitwise_not %428 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %430 = torch.aten.mul.Tensor %425, %429 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %431 = torch.prim.ListConstruct %int16, %int128, %int128 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %432 = torch.aten.view %430, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %433 = torch.aten.transpose.int %403, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %434 = torch.aten.view %433, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %435 = torch.aten.bmm %432, %434 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %436 = torch.prim.ListConstruct %int1, %int16, %int128, %int64 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %437 = torch.aten.view %435, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %438 = torch.prim.ListConstruct %int0, %int2, %int1, %int3 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %439 = torch.aten.permute %437, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %440 = torch.aten.clone %439, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %441 = torch.prim.ListConstruct %int1, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %442 = torch.aten.view %440, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %443 = torch.aten.transpose.int %286, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %444 = torch.aten.view %442, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %445 = torch.aten.mm %444, %443 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %446 = torch.aten.mul.Scalar %285, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %447 = torch.aten.add.Tensor %446, %445, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %448 = torch.aten.view %447, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %449 = torch.aten.add.Tensor %317, %448, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %450 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %451 = torch.aten.sum.dim_IntList %449, %450, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %452 = torch.aten.div.Scalar %451, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %453 = torch.aten.size.int %449, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %454 = torch.prim.ListConstruct %453, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %455 = torch.aten.broadcast_to %452, %454 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %456 = torch.aten.sub.Tensor %449, %455, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %457 = torch.aten.mul.Tensor %456, %456 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %458 = torch.aten.sum.dim_IntList %457, %450, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %459 = torch.aten.div.Scalar %458, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %460 = torch.aten.add.Scalar %459, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %461 = torch.aten.rsqrt %460 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %462 = torch.aten.size.int %449, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %463 = torch.prim.ListConstruct %462, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %464 = torch.aten.broadcast_to %461, %463 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %465 = torch.aten.mul.Tensor %456, %464 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %466 = torch.aten.mul.Tensor %465, %284 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %467 = torch.aten.add.Tensor %466, %283, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %468 = torch.aten.transpose.int %282, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %469 = torch.aten.view %467, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %470 = torch.aten.mm %469, %468 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %471 = torch.aten.mul.Scalar %281, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %472 = torch.aten.add.Tensor %471, %470, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %473 = torch.prim.ListConstruct %int1, %int128, %int4096 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %474 = torch.aten.view %472, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %475 = torch.aten.mul.Scalar %474, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %476 = torch.aten.mul.Scalar %474, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %477 = torch.aten.mul.Scalar %474, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %478 = torch.aten.mul.Tensor %477, %474 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %479 = torch.aten.add.Scalar %478, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %480 = torch.aten.mul.Tensor %476, %479 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %481 = torch.aten.tanh %480 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %482 = torch.aten.add.Scalar %481, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %483 = torch.aten.mul.Tensor %475, %482 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %484 = torch.aten.transpose.int %280, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %485 = torch.prim.ListConstruct %int128, %int4096 : (!torch.int, !torch.int) -> !torch.list<int> | |
| %486 = torch.aten.view %483, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %487 = torch.aten.mm %486, %484 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %488 = torch.aten.mul.Scalar %279, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %489 = torch.aten.add.Tensor %488, %487, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %490 = torch.aten.view %489, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %491 = torch.aten.add.Tensor %449, %490, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %492 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %493 = torch.aten.sum.dim_IntList %491, %492, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %494 = torch.aten.div.Scalar %493, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %495 = torch.aten.size.int %491, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %496 = torch.prim.ListConstruct %495, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %497 = torch.aten.broadcast_to %494, %496 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %498 = torch.aten.sub.Tensor %491, %497, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %499 = torch.aten.mul.Tensor %498, %498 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %500 = torch.aten.sum.dim_IntList %499, %492, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %501 = torch.aten.div.Scalar %500, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %502 = torch.aten.add.Scalar %501, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %503 = torch.aten.rsqrt %502 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %504 = torch.aten.size.int %491, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %505 = torch.prim.ListConstruct %504, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %506 = torch.aten.broadcast_to %503, %505 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %507 = torch.aten.mul.Tensor %498, %506 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %508 = torch.aten.mul.Tensor %507, %278 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %509 = torch.aten.add.Tensor %508, %277, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %510 = torch.aten.transpose.int %276, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %511 = torch.aten.view %509, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %512 = torch.aten.mm %511, %510 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %513 = torch.aten.mul.Scalar %275, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %514 = torch.aten.add.Tensor %513, %512, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %515 = torch.aten.view %514, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %516 = torch.aten.view %515, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %517 = torch.aten.slice.Tensor %516, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %518 = torch.aten.slice.Tensor %516, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %519 = torch.aten.slice.Tensor %516, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %520 = torch.aten.transpose.int %517, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %521 = torch.aten.view %520, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %522 = torch.aten.permute %518, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %523 = torch.aten.view %522, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %524 = torch.aten.bmm %521, %523 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %525 = torch.aten.mul.Scalar %524, %float1.250000e-01 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %526 = torch.aten.mul.Scalar %346, %float1.000000e00 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %527 = torch.aten.add.Tensor %525, %526, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %528 = torch.aten.view %527, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %529 = torch.aten.mul.Scalar %528, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %530 = torch.aten.add.Tensor %529, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %531 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %532 = torch.aten.maximum %530, %531 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_2, %indices_3 = torch.aten.max.dim %532, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %533 = torch.aten.sub.Tensor %532, %values_2, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %534 = torch.aten.exp %533 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %535 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %536 = torch.aten.sum.dim_IntList %534, %535, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %537 = torch.aten.div.Tensor %534, %536 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %538 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %539 = torch.aten.empty.memory_format %538, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %540 = torch.valsem.aten.copy %539, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %541 = torch.aten.bitwise_not %540 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %542 = torch.aten.mul.Tensor %537, %541 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %543 = torch.aten.view %542, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %544 = torch.aten.transpose.int %519, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %545 = torch.aten.view %544, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %546 = torch.aten.bmm %543, %545 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %547 = torch.aten.view %546, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %548 = torch.aten.permute %547, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %549 = torch.aten.clone %548, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %550 = torch.aten.view %549, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %551 = torch.aten.transpose.int %274, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %552 = torch.aten.view %550, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %553 = torch.aten.mm %552, %551 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %554 = torch.aten.mul.Scalar %273, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %555 = torch.aten.add.Tensor %554, %553, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %556 = torch.aten.view %555, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %557 = torch.aten.add.Tensor %491, %556, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %558 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %559 = torch.aten.sum.dim_IntList %557, %558, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %560 = torch.aten.div.Scalar %559, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %561 = torch.aten.size.int %557, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %562 = torch.prim.ListConstruct %561, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %563 = torch.aten.broadcast_to %560, %562 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %564 = torch.aten.sub.Tensor %557, %563, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %565 = torch.aten.mul.Tensor %564, %564 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %566 = torch.aten.sum.dim_IntList %565, %558, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %567 = torch.aten.div.Scalar %566, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %568 = torch.aten.add.Scalar %567, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %569 = torch.aten.rsqrt %568 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %570 = torch.aten.size.int %557, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %571 = torch.prim.ListConstruct %570, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %572 = torch.aten.broadcast_to %569, %571 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %573 = torch.aten.mul.Tensor %564, %572 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %574 = torch.aten.mul.Tensor %573, %272 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %575 = torch.aten.add.Tensor %574, %271, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %576 = torch.aten.transpose.int %270, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %577 = torch.aten.view %575, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %578 = torch.aten.mm %577, %576 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %579 = torch.aten.mul.Scalar %269, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %580 = torch.aten.add.Tensor %579, %578, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %581 = torch.aten.view %580, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %582 = torch.aten.mul.Scalar %581, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %583 = torch.aten.mul.Scalar %581, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %584 = torch.aten.mul.Scalar %581, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %585 = torch.aten.mul.Tensor %584, %581 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %586 = torch.aten.add.Scalar %585, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %587 = torch.aten.mul.Tensor %583, %586 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %588 = torch.aten.tanh %587 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %589 = torch.aten.add.Scalar %588, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %590 = torch.aten.mul.Tensor %582, %589 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %591 = torch.aten.transpose.int %268, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %592 = torch.aten.view %590, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %593 = torch.aten.mm %592, %591 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %594 = torch.aten.mul.Scalar %267, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %595 = torch.aten.add.Tensor %594, %593, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %596 = torch.aten.view %595, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %597 = torch.aten.add.Tensor %557, %596, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %598 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %599 = torch.aten.sum.dim_IntList %597, %598, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %600 = torch.aten.div.Scalar %599, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %601 = torch.aten.size.int %597, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %602 = torch.prim.ListConstruct %601, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %603 = torch.aten.broadcast_to %600, %602 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %604 = torch.aten.sub.Tensor %597, %603, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %605 = torch.aten.mul.Tensor %604, %604 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %606 = torch.aten.sum.dim_IntList %605, %598, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %607 = torch.aten.div.Scalar %606, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %608 = torch.aten.add.Scalar %607, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %609 = torch.aten.rsqrt %608 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %610 = torch.aten.size.int %597, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %611 = torch.prim.ListConstruct %610, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %612 = torch.aten.broadcast_to %609, %611 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %613 = torch.aten.mul.Tensor %604, %612 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %614 = torch.aten.mul.Tensor %613, %266 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %615 = torch.aten.add.Tensor %614, %265, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %616 = torch.aten.transpose.int %264, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %617 = torch.aten.view %615, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %618 = torch.aten.mm %617, %616 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %619 = torch.aten.mul.Scalar %263, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %620 = torch.aten.add.Tensor %619, %618, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %621 = torch.aten.view %620, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %622 = torch.aten.view %621, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %623 = torch.aten.slice.Tensor %622, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %624 = torch.aten.slice.Tensor %622, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %625 = torch.aten.slice.Tensor %622, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %626 = torch.aten.transpose.int %623, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %627 = torch.aten.view %626, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %628 = torch.aten.permute %624, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %629 = torch.aten.view %628, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %630 = torch.aten.bmm %627, %629 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %631 = torch.aten.mul.Scalar %630, %float6.250000e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %632 = torch.aten.mul.Scalar %346, %float5.000000e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %633 = torch.aten.add.Tensor %631, %632, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %634 = torch.aten.view %633, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %635 = torch.aten.mul.Scalar %634, %int2 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %636 = torch.aten.add.Tensor %635, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %637 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %638 = torch.aten.maximum %636, %637 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_4, %indices_5 = torch.aten.max.dim %638, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %639 = torch.aten.sub.Tensor %638, %values_4, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %640 = torch.aten.exp %639 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %641 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %642 = torch.aten.sum.dim_IntList %640, %641, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %643 = torch.aten.div.Tensor %640, %642 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %644 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %645 = torch.aten.empty.memory_format %644, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %646 = torch.valsem.aten.copy %645, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %647 = torch.aten.bitwise_not %646 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %648 = torch.aten.mul.Tensor %643, %647 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %649 = torch.aten.view %648, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %650 = torch.aten.transpose.int %625, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %651 = torch.aten.view %650, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %652 = torch.aten.bmm %649, %651 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %653 = torch.aten.view %652, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %654 = torch.aten.permute %653, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %655 = torch.aten.clone %654, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %656 = torch.aten.view %655, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %657 = torch.aten.transpose.int %262, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %658 = torch.aten.view %656, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %659 = torch.aten.mm %658, %657 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %660 = torch.aten.mul.Scalar %261, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %661 = torch.aten.add.Tensor %660, %659, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %662 = torch.aten.view %661, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %663 = torch.aten.add.Tensor %597, %662, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %664 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %665 = torch.aten.sum.dim_IntList %663, %664, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %666 = torch.aten.div.Scalar %665, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %667 = torch.aten.size.int %663, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %668 = torch.prim.ListConstruct %667, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %669 = torch.aten.broadcast_to %666, %668 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %670 = torch.aten.sub.Tensor %663, %669, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %671 = torch.aten.mul.Tensor %670, %670 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %672 = torch.aten.sum.dim_IntList %671, %664, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %673 = torch.aten.div.Scalar %672, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %674 = torch.aten.add.Scalar %673, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %675 = torch.aten.rsqrt %674 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %676 = torch.aten.size.int %663, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %677 = torch.prim.ListConstruct %676, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %678 = torch.aten.broadcast_to %675, %677 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %679 = torch.aten.mul.Tensor %670, %678 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %680 = torch.aten.mul.Tensor %679, %260 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %681 = torch.aten.add.Tensor %680, %259, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %682 = torch.aten.transpose.int %258, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %683 = torch.aten.view %681, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %684 = torch.aten.mm %683, %682 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %685 = torch.aten.mul.Scalar %257, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %686 = torch.aten.add.Tensor %685, %684, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %687 = torch.aten.view %686, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %688 = torch.aten.mul.Scalar %687, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %689 = torch.aten.mul.Scalar %687, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %690 = torch.aten.mul.Scalar %687, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %691 = torch.aten.mul.Tensor %690, %687 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %692 = torch.aten.add.Scalar %691, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %693 = torch.aten.mul.Tensor %689, %692 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %694 = torch.aten.tanh %693 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %695 = torch.aten.add.Scalar %694, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %696 = torch.aten.mul.Tensor %688, %695 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %697 = torch.aten.transpose.int %256, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %698 = torch.aten.view %696, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %699 = torch.aten.mm %698, %697 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %700 = torch.aten.mul.Scalar %255, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %701 = torch.aten.add.Tensor %700, %699, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %702 = torch.aten.view %701, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %703 = torch.aten.add.Tensor %663, %702, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %704 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %705 = torch.aten.sum.dim_IntList %703, %704, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %706 = torch.aten.div.Scalar %705, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %707 = torch.aten.size.int %703, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %708 = torch.prim.ListConstruct %707, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %709 = torch.aten.broadcast_to %706, %708 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %710 = torch.aten.sub.Tensor %703, %709, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %711 = torch.aten.mul.Tensor %710, %710 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %712 = torch.aten.sum.dim_IntList %711, %704, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %713 = torch.aten.div.Scalar %712, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %714 = torch.aten.add.Scalar %713, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %715 = torch.aten.rsqrt %714 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %716 = torch.aten.size.int %703, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %717 = torch.prim.ListConstruct %716, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %718 = torch.aten.broadcast_to %715, %717 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %719 = torch.aten.mul.Tensor %710, %718 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %720 = torch.aten.mul.Tensor %719, %254 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %721 = torch.aten.add.Tensor %720, %253, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %722 = torch.aten.transpose.int %252, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %723 = torch.aten.view %721, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %724 = torch.aten.mm %723, %722 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %725 = torch.aten.mul.Scalar %251, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %726 = torch.aten.add.Tensor %725, %724, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %727 = torch.aten.view %726, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %728 = torch.aten.view %727, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %729 = torch.aten.slice.Tensor %728, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %730 = torch.aten.slice.Tensor %728, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %731 = torch.aten.slice.Tensor %728, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %732 = torch.aten.transpose.int %729, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %733 = torch.aten.view %732, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %734 = torch.aten.permute %730, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %735 = torch.aten.view %734, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %736 = torch.aten.bmm %733, %735 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %737 = torch.aten.mul.Scalar %736, %float4.166670e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %738 = torch.aten.mul.Scalar %346, %float3.333330e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %739 = torch.aten.add.Tensor %737, %738, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %740 = torch.aten.view %739, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %741 = torch.aten.mul.Scalar %740, %int3 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %742 = torch.aten.add.Tensor %741, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %743 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %744 = torch.aten.maximum %742, %743 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_6, %indices_7 = torch.aten.max.dim %744, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %745 = torch.aten.sub.Tensor %744, %values_6, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %746 = torch.aten.exp %745 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %747 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %748 = torch.aten.sum.dim_IntList %746, %747, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %749 = torch.aten.div.Tensor %746, %748 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %750 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %751 = torch.aten.empty.memory_format %750, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %752 = torch.valsem.aten.copy %751, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %753 = torch.aten.bitwise_not %752 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %754 = torch.aten.mul.Tensor %749, %753 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %755 = torch.aten.view %754, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %756 = torch.aten.transpose.int %731, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %757 = torch.aten.view %756, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %758 = torch.aten.bmm %755, %757 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %759 = torch.aten.view %758, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %760 = torch.aten.permute %759, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %761 = torch.aten.clone %760, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %762 = torch.aten.view %761, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %763 = torch.aten.transpose.int %250, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %764 = torch.aten.view %762, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %765 = torch.aten.mm %764, %763 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %766 = torch.aten.mul.Scalar %249, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %767 = torch.aten.add.Tensor %766, %765, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %768 = torch.aten.view %767, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %769 = torch.aten.add.Tensor %703, %768, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %770 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %771 = torch.aten.sum.dim_IntList %769, %770, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %772 = torch.aten.div.Scalar %771, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %773 = torch.aten.size.int %769, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %774 = torch.prim.ListConstruct %773, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %775 = torch.aten.broadcast_to %772, %774 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %776 = torch.aten.sub.Tensor %769, %775, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %777 = torch.aten.mul.Tensor %776, %776 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %778 = torch.aten.sum.dim_IntList %777, %770, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %779 = torch.aten.div.Scalar %778, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %780 = torch.aten.add.Scalar %779, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %781 = torch.aten.rsqrt %780 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %782 = torch.aten.size.int %769, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %783 = torch.prim.ListConstruct %782, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %784 = torch.aten.broadcast_to %781, %783 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %785 = torch.aten.mul.Tensor %776, %784 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %786 = torch.aten.mul.Tensor %785, %248 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %787 = torch.aten.add.Tensor %786, %247, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %788 = torch.aten.transpose.int %246, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %789 = torch.aten.view %787, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %790 = torch.aten.mm %789, %788 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %791 = torch.aten.mul.Scalar %245, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %792 = torch.aten.add.Tensor %791, %790, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %793 = torch.aten.view %792, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %794 = torch.aten.mul.Scalar %793, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %795 = torch.aten.mul.Scalar %793, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %796 = torch.aten.mul.Scalar %793, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %797 = torch.aten.mul.Tensor %796, %793 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %798 = torch.aten.add.Scalar %797, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %799 = torch.aten.mul.Tensor %795, %798 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %800 = torch.aten.tanh %799 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %801 = torch.aten.add.Scalar %800, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %802 = torch.aten.mul.Tensor %794, %801 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %803 = torch.aten.transpose.int %244, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %804 = torch.aten.view %802, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %805 = torch.aten.mm %804, %803 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %806 = torch.aten.mul.Scalar %243, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %807 = torch.aten.add.Tensor %806, %805, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %808 = torch.aten.view %807, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %809 = torch.aten.add.Tensor %769, %808, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %810 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %811 = torch.aten.sum.dim_IntList %809, %810, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %812 = torch.aten.div.Scalar %811, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %813 = torch.aten.size.int %809, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %814 = torch.prim.ListConstruct %813, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %815 = torch.aten.broadcast_to %812, %814 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %816 = torch.aten.sub.Tensor %809, %815, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %817 = torch.aten.mul.Tensor %816, %816 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %818 = torch.aten.sum.dim_IntList %817, %810, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %819 = torch.aten.div.Scalar %818, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %820 = torch.aten.add.Scalar %819, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %821 = torch.aten.rsqrt %820 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %822 = torch.aten.size.int %809, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %823 = torch.prim.ListConstruct %822, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %824 = torch.aten.broadcast_to %821, %823 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %825 = torch.aten.mul.Tensor %816, %824 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %826 = torch.aten.mul.Tensor %825, %242 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %827 = torch.aten.add.Tensor %826, %241, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %828 = torch.aten.transpose.int %240, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %829 = torch.aten.view %827, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %830 = torch.aten.mm %829, %828 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %831 = torch.aten.mul.Scalar %239, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %832 = torch.aten.add.Tensor %831, %830, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %833 = torch.aten.view %832, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %834 = torch.aten.view %833, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %835 = torch.aten.slice.Tensor %834, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %836 = torch.aten.slice.Tensor %834, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %837 = torch.aten.slice.Tensor %834, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %838 = torch.aten.transpose.int %835, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %839 = torch.aten.view %838, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %840 = torch.aten.permute %836, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %841 = torch.aten.view %840, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %842 = torch.aten.bmm %839, %841 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %843 = torch.aten.mul.Scalar %842, %float3.125000e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %844 = torch.aten.mul.Scalar %346, %float2.500000e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %845 = torch.aten.add.Tensor %843, %844, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %846 = torch.aten.view %845, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %847 = torch.aten.mul.Scalar %846, %int4 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %848 = torch.aten.add.Tensor %847, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %849 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %850 = torch.aten.maximum %848, %849 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_8, %indices_9 = torch.aten.max.dim %850, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %851 = torch.aten.sub.Tensor %850, %values_8, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %852 = torch.aten.exp %851 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %853 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %854 = torch.aten.sum.dim_IntList %852, %853, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %855 = torch.aten.div.Tensor %852, %854 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %856 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %857 = torch.aten.empty.memory_format %856, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %858 = torch.valsem.aten.copy %857, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %859 = torch.aten.bitwise_not %858 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %860 = torch.aten.mul.Tensor %855, %859 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %861 = torch.aten.view %860, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %862 = torch.aten.transpose.int %837, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %863 = torch.aten.view %862, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %864 = torch.aten.bmm %861, %863 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %865 = torch.aten.view %864, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %866 = torch.aten.permute %865, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %867 = torch.aten.clone %866, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %868 = torch.aten.view %867, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %869 = torch.aten.transpose.int %238, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %870 = torch.aten.view %868, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %871 = torch.aten.mm %870, %869 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %872 = torch.aten.mul.Scalar %237, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %873 = torch.aten.add.Tensor %872, %871, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %874 = torch.aten.view %873, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %875 = torch.aten.add.Tensor %809, %874, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %876 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %877 = torch.aten.sum.dim_IntList %875, %876, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %878 = torch.aten.div.Scalar %877, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %879 = torch.aten.size.int %875, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %880 = torch.prim.ListConstruct %879, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %881 = torch.aten.broadcast_to %878, %880 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %882 = torch.aten.sub.Tensor %875, %881, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %883 = torch.aten.mul.Tensor %882, %882 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %884 = torch.aten.sum.dim_IntList %883, %876, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %885 = torch.aten.div.Scalar %884, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %886 = torch.aten.add.Scalar %885, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %887 = torch.aten.rsqrt %886 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %888 = torch.aten.size.int %875, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %889 = torch.prim.ListConstruct %888, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %890 = torch.aten.broadcast_to %887, %889 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %891 = torch.aten.mul.Tensor %882, %890 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %892 = torch.aten.mul.Tensor %891, %236 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %893 = torch.aten.add.Tensor %892, %235, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %894 = torch.aten.transpose.int %234, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %895 = torch.aten.view %893, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %896 = torch.aten.mm %895, %894 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %897 = torch.aten.mul.Scalar %233, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %898 = torch.aten.add.Tensor %897, %896, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %899 = torch.aten.view %898, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %900 = torch.aten.mul.Scalar %899, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %901 = torch.aten.mul.Scalar %899, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %902 = torch.aten.mul.Scalar %899, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %903 = torch.aten.mul.Tensor %902, %899 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %904 = torch.aten.add.Scalar %903, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %905 = torch.aten.mul.Tensor %901, %904 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %906 = torch.aten.tanh %905 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %907 = torch.aten.add.Scalar %906, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %908 = torch.aten.mul.Tensor %900, %907 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %909 = torch.aten.transpose.int %232, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %910 = torch.aten.view %908, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %911 = torch.aten.mm %910, %909 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %912 = torch.aten.mul.Scalar %231, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %913 = torch.aten.add.Tensor %912, %911, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %914 = torch.aten.view %913, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %915 = torch.aten.add.Tensor %875, %914, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %916 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %917 = torch.aten.sum.dim_IntList %915, %916, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %918 = torch.aten.div.Scalar %917, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %919 = torch.aten.size.int %915, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %920 = torch.prim.ListConstruct %919, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %921 = torch.aten.broadcast_to %918, %920 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %922 = torch.aten.sub.Tensor %915, %921, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %923 = torch.aten.mul.Tensor %922, %922 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %924 = torch.aten.sum.dim_IntList %923, %916, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %925 = torch.aten.div.Scalar %924, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %926 = torch.aten.add.Scalar %925, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %927 = torch.aten.rsqrt %926 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %928 = torch.aten.size.int %915, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %929 = torch.prim.ListConstruct %928, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %930 = torch.aten.broadcast_to %927, %929 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %931 = torch.aten.mul.Tensor %922, %930 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %932 = torch.aten.mul.Tensor %931, %230 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %933 = torch.aten.add.Tensor %932, %229, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %934 = torch.aten.transpose.int %228, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %935 = torch.aten.view %933, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %936 = torch.aten.mm %935, %934 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %937 = torch.aten.mul.Scalar %227, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %938 = torch.aten.add.Tensor %937, %936, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %939 = torch.aten.view %938, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %940 = torch.aten.view %939, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %941 = torch.aten.slice.Tensor %940, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %942 = torch.aten.slice.Tensor %940, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %943 = torch.aten.slice.Tensor %940, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %944 = torch.aten.transpose.int %941, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %945 = torch.aten.view %944, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %946 = torch.aten.permute %942, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %947 = torch.aten.view %946, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %948 = torch.aten.bmm %945, %947 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %949 = torch.aten.mul.Scalar %948, %float2.500000e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %950 = torch.aten.mul.Scalar %346, %float2.000000e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %951 = torch.aten.add.Tensor %949, %950, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %952 = torch.aten.view %951, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %953 = torch.aten.mul.Scalar %952, %int5 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %954 = torch.aten.add.Tensor %953, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %955 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %956 = torch.aten.maximum %954, %955 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_10, %indices_11 = torch.aten.max.dim %956, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %957 = torch.aten.sub.Tensor %956, %values_10, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %958 = torch.aten.exp %957 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %959 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %960 = torch.aten.sum.dim_IntList %958, %959, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %961 = torch.aten.div.Tensor %958, %960 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %962 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %963 = torch.aten.empty.memory_format %962, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %964 = torch.valsem.aten.copy %963, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %965 = torch.aten.bitwise_not %964 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %966 = torch.aten.mul.Tensor %961, %965 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %967 = torch.aten.view %966, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %968 = torch.aten.transpose.int %943, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %969 = torch.aten.view %968, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %970 = torch.aten.bmm %967, %969 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %971 = torch.aten.view %970, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %972 = torch.aten.permute %971, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %973 = torch.aten.clone %972, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %974 = torch.aten.view %973, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %975 = torch.aten.transpose.int %226, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %976 = torch.aten.view %974, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %977 = torch.aten.mm %976, %975 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %978 = torch.aten.mul.Scalar %225, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %979 = torch.aten.add.Tensor %978, %977, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %980 = torch.aten.view %979, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %981 = torch.aten.add.Tensor %915, %980, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %982 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %983 = torch.aten.sum.dim_IntList %981, %982, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %984 = torch.aten.div.Scalar %983, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %985 = torch.aten.size.int %981, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %986 = torch.prim.ListConstruct %985, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %987 = torch.aten.broadcast_to %984, %986 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %988 = torch.aten.sub.Tensor %981, %987, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %989 = torch.aten.mul.Tensor %988, %988 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %990 = torch.aten.sum.dim_IntList %989, %982, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %991 = torch.aten.div.Scalar %990, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %992 = torch.aten.add.Scalar %991, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %993 = torch.aten.rsqrt %992 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %994 = torch.aten.size.int %981, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %995 = torch.prim.ListConstruct %994, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %996 = torch.aten.broadcast_to %993, %995 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %997 = torch.aten.mul.Tensor %988, %996 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %998 = torch.aten.mul.Tensor %997, %224 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %999 = torch.aten.add.Tensor %998, %223, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1000 = torch.aten.transpose.int %222, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1001 = torch.aten.view %999, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1002 = torch.aten.mm %1001, %1000 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1003 = torch.aten.mul.Scalar %221, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1004 = torch.aten.add.Tensor %1003, %1002, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1005 = torch.aten.view %1004, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1006 = torch.aten.mul.Scalar %1005, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1007 = torch.aten.mul.Scalar %1005, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1008 = torch.aten.mul.Scalar %1005, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1009 = torch.aten.mul.Tensor %1008, %1005 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1010 = torch.aten.add.Scalar %1009, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1011 = torch.aten.mul.Tensor %1007, %1010 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1012 = torch.aten.tanh %1011 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1013 = torch.aten.add.Scalar %1012, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1014 = torch.aten.mul.Tensor %1006, %1013 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1015 = torch.aten.transpose.int %220, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1016 = torch.aten.view %1014, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1017 = torch.aten.mm %1016, %1015 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1018 = torch.aten.mul.Scalar %219, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1019 = torch.aten.add.Tensor %1018, %1017, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1020 = torch.aten.view %1019, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1021 = torch.aten.add.Tensor %981, %1020, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1022 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1023 = torch.aten.sum.dim_IntList %1021, %1022, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1024 = torch.aten.div.Scalar %1023, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1025 = torch.aten.size.int %1021, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1026 = torch.prim.ListConstruct %1025, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1027 = torch.aten.broadcast_to %1024, %1026 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1028 = torch.aten.sub.Tensor %1021, %1027, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1029 = torch.aten.mul.Tensor %1028, %1028 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1030 = torch.aten.sum.dim_IntList %1029, %1022, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1031 = torch.aten.div.Scalar %1030, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1032 = torch.aten.add.Scalar %1031, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1033 = torch.aten.rsqrt %1032 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1034 = torch.aten.size.int %1021, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1035 = torch.prim.ListConstruct %1034, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1036 = torch.aten.broadcast_to %1033, %1035 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1037 = torch.aten.mul.Tensor %1028, %1036 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1038 = torch.aten.mul.Tensor %1037, %218 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1039 = torch.aten.add.Tensor %1038, %217, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1040 = torch.aten.transpose.int %216, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1041 = torch.aten.view %1039, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1042 = torch.aten.mm %1041, %1040 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1043 = torch.aten.mul.Scalar %215, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1044 = torch.aten.add.Tensor %1043, %1042, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1045 = torch.aten.view %1044, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1046 = torch.aten.view %1045, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1047 = torch.aten.slice.Tensor %1046, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1048 = torch.aten.slice.Tensor %1046, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1049 = torch.aten.slice.Tensor %1046, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1050 = torch.aten.transpose.int %1047, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1051 = torch.aten.view %1050, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1052 = torch.aten.permute %1048, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1053 = torch.aten.view %1052, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1054 = torch.aten.bmm %1051, %1053 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1055 = torch.aten.mul.Scalar %1054, %float2.083330e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1056 = torch.aten.mul.Scalar %346, %float1.666670e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1057 = torch.aten.add.Tensor %1055, %1056, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1058 = torch.aten.view %1057, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1059 = torch.aten.mul.Scalar %1058, %int6 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1060 = torch.aten.add.Tensor %1059, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1061 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1062 = torch.aten.maximum %1060, %1061 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_12, %indices_13 = torch.aten.max.dim %1062, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1063 = torch.aten.sub.Tensor %1062, %values_12, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1064 = torch.aten.exp %1063 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1065 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1066 = torch.aten.sum.dim_IntList %1064, %1065, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1067 = torch.aten.div.Tensor %1064, %1066 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1068 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1069 = torch.aten.empty.memory_format %1068, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1070 = torch.valsem.aten.copy %1069, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1071 = torch.aten.bitwise_not %1070 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1072 = torch.aten.mul.Tensor %1067, %1071 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1073 = torch.aten.view %1072, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1074 = torch.aten.transpose.int %1049, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1075 = torch.aten.view %1074, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1076 = torch.aten.bmm %1073, %1075 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1077 = torch.aten.view %1076, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1078 = torch.aten.permute %1077, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1079 = torch.aten.clone %1078, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1080 = torch.aten.view %1079, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1081 = torch.aten.transpose.int %214, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1082 = torch.aten.view %1080, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1083 = torch.aten.mm %1082, %1081 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1084 = torch.aten.mul.Scalar %213, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1085 = torch.aten.add.Tensor %1084, %1083, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1086 = torch.aten.view %1085, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1087 = torch.aten.add.Tensor %1021, %1086, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1088 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1089 = torch.aten.sum.dim_IntList %1087, %1088, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1090 = torch.aten.div.Scalar %1089, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1091 = torch.aten.size.int %1087, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1092 = torch.prim.ListConstruct %1091, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1093 = torch.aten.broadcast_to %1090, %1092 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1094 = torch.aten.sub.Tensor %1087, %1093, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1095 = torch.aten.mul.Tensor %1094, %1094 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1096 = torch.aten.sum.dim_IntList %1095, %1088, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1097 = torch.aten.div.Scalar %1096, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1098 = torch.aten.add.Scalar %1097, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1099 = torch.aten.rsqrt %1098 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1100 = torch.aten.size.int %1087, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1101 = torch.prim.ListConstruct %1100, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1102 = torch.aten.broadcast_to %1099, %1101 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1103 = torch.aten.mul.Tensor %1094, %1102 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1104 = torch.aten.mul.Tensor %1103, %212 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1105 = torch.aten.add.Tensor %1104, %211, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1106 = torch.aten.transpose.int %210, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1107 = torch.aten.view %1105, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1108 = torch.aten.mm %1107, %1106 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1109 = torch.aten.mul.Scalar %209, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1110 = torch.aten.add.Tensor %1109, %1108, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1111 = torch.aten.view %1110, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1112 = torch.aten.mul.Scalar %1111, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1113 = torch.aten.mul.Scalar %1111, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1114 = torch.aten.mul.Scalar %1111, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1115 = torch.aten.mul.Tensor %1114, %1111 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1116 = torch.aten.add.Scalar %1115, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1117 = torch.aten.mul.Tensor %1113, %1116 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1118 = torch.aten.tanh %1117 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1119 = torch.aten.add.Scalar %1118, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1120 = torch.aten.mul.Tensor %1112, %1119 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1121 = torch.aten.transpose.int %208, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1122 = torch.aten.view %1120, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1123 = torch.aten.mm %1122, %1121 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1124 = torch.aten.mul.Scalar %207, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1125 = torch.aten.add.Tensor %1124, %1123, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1126 = torch.aten.view %1125, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1127 = torch.aten.add.Tensor %1087, %1126, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1128 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1129 = torch.aten.sum.dim_IntList %1127, %1128, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1130 = torch.aten.div.Scalar %1129, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1131 = torch.aten.size.int %1127, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1132 = torch.prim.ListConstruct %1131, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1133 = torch.aten.broadcast_to %1130, %1132 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1134 = torch.aten.sub.Tensor %1127, %1133, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1135 = torch.aten.mul.Tensor %1134, %1134 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1136 = torch.aten.sum.dim_IntList %1135, %1128, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1137 = torch.aten.div.Scalar %1136, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1138 = torch.aten.add.Scalar %1137, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1139 = torch.aten.rsqrt %1138 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1140 = torch.aten.size.int %1127, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1141 = torch.prim.ListConstruct %1140, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1142 = torch.aten.broadcast_to %1139, %1141 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1143 = torch.aten.mul.Tensor %1134, %1142 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1144 = torch.aten.mul.Tensor %1143, %206 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1145 = torch.aten.add.Tensor %1144, %205, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1146 = torch.aten.transpose.int %204, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1147 = torch.aten.view %1145, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1148 = torch.aten.mm %1147, %1146 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1149 = torch.aten.mul.Scalar %203, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1150 = torch.aten.add.Tensor %1149, %1148, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1151 = torch.aten.view %1150, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1152 = torch.aten.view %1151, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1153 = torch.aten.slice.Tensor %1152, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1154 = torch.aten.slice.Tensor %1152, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1155 = torch.aten.slice.Tensor %1152, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1156 = torch.aten.transpose.int %1153, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1157 = torch.aten.view %1156, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1158 = torch.aten.permute %1154, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1159 = torch.aten.view %1158, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1160 = torch.aten.bmm %1157, %1159 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1161 = torch.aten.mul.Scalar %1160, %float1.785710e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1162 = torch.aten.mul.Scalar %346, %float1.428570e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1163 = torch.aten.add.Tensor %1161, %1162, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1164 = torch.aten.view %1163, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1165 = torch.aten.mul.Scalar %1164, %int7 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1166 = torch.aten.add.Tensor %1165, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1167 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1168 = torch.aten.maximum %1166, %1167 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_14, %indices_15 = torch.aten.max.dim %1168, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1169 = torch.aten.sub.Tensor %1168, %values_14, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1170 = torch.aten.exp %1169 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1171 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1172 = torch.aten.sum.dim_IntList %1170, %1171, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1173 = torch.aten.div.Tensor %1170, %1172 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1174 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1175 = torch.aten.empty.memory_format %1174, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1176 = torch.valsem.aten.copy %1175, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1177 = torch.aten.bitwise_not %1176 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1178 = torch.aten.mul.Tensor %1173, %1177 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1179 = torch.aten.view %1178, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1180 = torch.aten.transpose.int %1155, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1181 = torch.aten.view %1180, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1182 = torch.aten.bmm %1179, %1181 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1183 = torch.aten.view %1182, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1184 = torch.aten.permute %1183, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1185 = torch.aten.clone %1184, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1186 = torch.aten.view %1185, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1187 = torch.aten.transpose.int %202, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1188 = torch.aten.view %1186, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1189 = torch.aten.mm %1188, %1187 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1190 = torch.aten.mul.Scalar %201, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1191 = torch.aten.add.Tensor %1190, %1189, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1192 = torch.aten.view %1191, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1193 = torch.aten.add.Tensor %1127, %1192, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1194 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1195 = torch.aten.sum.dim_IntList %1193, %1194, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1196 = torch.aten.div.Scalar %1195, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1197 = torch.aten.size.int %1193, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1198 = torch.prim.ListConstruct %1197, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1199 = torch.aten.broadcast_to %1196, %1198 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1200 = torch.aten.sub.Tensor %1193, %1199, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1201 = torch.aten.mul.Tensor %1200, %1200 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1202 = torch.aten.sum.dim_IntList %1201, %1194, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1203 = torch.aten.div.Scalar %1202, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1204 = torch.aten.add.Scalar %1203, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1205 = torch.aten.rsqrt %1204 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1206 = torch.aten.size.int %1193, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1207 = torch.prim.ListConstruct %1206, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1208 = torch.aten.broadcast_to %1205, %1207 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1209 = torch.aten.mul.Tensor %1200, %1208 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1210 = torch.aten.mul.Tensor %1209, %200 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1211 = torch.aten.add.Tensor %1210, %199, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1212 = torch.aten.transpose.int %198, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1213 = torch.aten.view %1211, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1214 = torch.aten.mm %1213, %1212 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1215 = torch.aten.mul.Scalar %197, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1216 = torch.aten.add.Tensor %1215, %1214, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1217 = torch.aten.view %1216, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1218 = torch.aten.mul.Scalar %1217, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1219 = torch.aten.mul.Scalar %1217, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1220 = torch.aten.mul.Scalar %1217, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1221 = torch.aten.mul.Tensor %1220, %1217 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1222 = torch.aten.add.Scalar %1221, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1223 = torch.aten.mul.Tensor %1219, %1222 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1224 = torch.aten.tanh %1223 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1225 = torch.aten.add.Scalar %1224, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1226 = torch.aten.mul.Tensor %1218, %1225 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1227 = torch.aten.transpose.int %196, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1228 = torch.aten.view %1226, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1229 = torch.aten.mm %1228, %1227 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1230 = torch.aten.mul.Scalar %195, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1231 = torch.aten.add.Tensor %1230, %1229, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1232 = torch.aten.view %1231, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1233 = torch.aten.add.Tensor %1193, %1232, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1234 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1235 = torch.aten.sum.dim_IntList %1233, %1234, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1236 = torch.aten.div.Scalar %1235, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1237 = torch.aten.size.int %1233, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1238 = torch.prim.ListConstruct %1237, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1239 = torch.aten.broadcast_to %1236, %1238 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1240 = torch.aten.sub.Tensor %1233, %1239, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1241 = torch.aten.mul.Tensor %1240, %1240 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1242 = torch.aten.sum.dim_IntList %1241, %1234, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1243 = torch.aten.div.Scalar %1242, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1244 = torch.aten.add.Scalar %1243, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1245 = torch.aten.rsqrt %1244 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1246 = torch.aten.size.int %1233, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1247 = torch.prim.ListConstruct %1246, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1248 = torch.aten.broadcast_to %1245, %1247 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1249 = torch.aten.mul.Tensor %1240, %1248 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1250 = torch.aten.mul.Tensor %1249, %194 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1251 = torch.aten.add.Tensor %1250, %193, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1252 = torch.aten.transpose.int %192, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1253 = torch.aten.view %1251, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1254 = torch.aten.mm %1253, %1252 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1255 = torch.aten.mul.Scalar %191, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1256 = torch.aten.add.Tensor %1255, %1254, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1257 = torch.aten.view %1256, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1258 = torch.aten.view %1257, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1259 = torch.aten.slice.Tensor %1258, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1260 = torch.aten.slice.Tensor %1258, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1261 = torch.aten.slice.Tensor %1258, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1262 = torch.aten.transpose.int %1259, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1263 = torch.aten.view %1262, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1264 = torch.aten.permute %1260, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1265 = torch.aten.view %1264, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1266 = torch.aten.bmm %1263, %1265 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1267 = torch.aten.mul.Scalar %1266, %float1.562500e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1268 = torch.aten.mul.Scalar %346, %float1.250000e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1269 = torch.aten.add.Tensor %1267, %1268, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1270 = torch.aten.view %1269, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1271 = torch.aten.mul.Scalar %1270, %int8 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1272 = torch.aten.add.Tensor %1271, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1273 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1274 = torch.aten.maximum %1272, %1273 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_16, %indices_17 = torch.aten.max.dim %1274, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1275 = torch.aten.sub.Tensor %1274, %values_16, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1276 = torch.aten.exp %1275 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1277 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1278 = torch.aten.sum.dim_IntList %1276, %1277, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1279 = torch.aten.div.Tensor %1276, %1278 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1280 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1281 = torch.aten.empty.memory_format %1280, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1282 = torch.valsem.aten.copy %1281, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1283 = torch.aten.bitwise_not %1282 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1284 = torch.aten.mul.Tensor %1279, %1283 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1285 = torch.aten.view %1284, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1286 = torch.aten.transpose.int %1261, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1287 = torch.aten.view %1286, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1288 = torch.aten.bmm %1285, %1287 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1289 = torch.aten.view %1288, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1290 = torch.aten.permute %1289, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1291 = torch.aten.clone %1290, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1292 = torch.aten.view %1291, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1293 = torch.aten.transpose.int %190, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1294 = torch.aten.view %1292, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1295 = torch.aten.mm %1294, %1293 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1296 = torch.aten.mul.Scalar %189, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1297 = torch.aten.add.Tensor %1296, %1295, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1298 = torch.aten.view %1297, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1299 = torch.aten.add.Tensor %1233, %1298, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1300 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1301 = torch.aten.sum.dim_IntList %1299, %1300, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1302 = torch.aten.div.Scalar %1301, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1303 = torch.aten.size.int %1299, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1304 = torch.prim.ListConstruct %1303, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1305 = torch.aten.broadcast_to %1302, %1304 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1306 = torch.aten.sub.Tensor %1299, %1305, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1307 = torch.aten.mul.Tensor %1306, %1306 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1308 = torch.aten.sum.dim_IntList %1307, %1300, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1309 = torch.aten.div.Scalar %1308, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1310 = torch.aten.add.Scalar %1309, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1311 = torch.aten.rsqrt %1310 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1312 = torch.aten.size.int %1299, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1313 = torch.prim.ListConstruct %1312, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1314 = torch.aten.broadcast_to %1311, %1313 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1315 = torch.aten.mul.Tensor %1306, %1314 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1316 = torch.aten.mul.Tensor %1315, %188 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1317 = torch.aten.add.Tensor %1316, %187, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1318 = torch.aten.transpose.int %186, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1319 = torch.aten.view %1317, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1320 = torch.aten.mm %1319, %1318 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1321 = torch.aten.mul.Scalar %185, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1322 = torch.aten.add.Tensor %1321, %1320, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1323 = torch.aten.view %1322, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1324 = torch.aten.mul.Scalar %1323, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1325 = torch.aten.mul.Scalar %1323, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1326 = torch.aten.mul.Scalar %1323, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1327 = torch.aten.mul.Tensor %1326, %1323 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1328 = torch.aten.add.Scalar %1327, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1329 = torch.aten.mul.Tensor %1325, %1328 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1330 = torch.aten.tanh %1329 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1331 = torch.aten.add.Scalar %1330, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1332 = torch.aten.mul.Tensor %1324, %1331 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1333 = torch.aten.transpose.int %184, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1334 = torch.aten.view %1332, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1335 = torch.aten.mm %1334, %1333 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1336 = torch.aten.mul.Scalar %183, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1337 = torch.aten.add.Tensor %1336, %1335, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1338 = torch.aten.view %1337, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1339 = torch.aten.add.Tensor %1299, %1338, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1340 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1341 = torch.aten.sum.dim_IntList %1339, %1340, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1342 = torch.aten.div.Scalar %1341, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1343 = torch.aten.size.int %1339, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1344 = torch.prim.ListConstruct %1343, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1345 = torch.aten.broadcast_to %1342, %1344 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1346 = torch.aten.sub.Tensor %1339, %1345, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1347 = torch.aten.mul.Tensor %1346, %1346 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1348 = torch.aten.sum.dim_IntList %1347, %1340, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1349 = torch.aten.div.Scalar %1348, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1350 = torch.aten.add.Scalar %1349, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1351 = torch.aten.rsqrt %1350 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1352 = torch.aten.size.int %1339, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1353 = torch.prim.ListConstruct %1352, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1354 = torch.aten.broadcast_to %1351, %1353 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1355 = torch.aten.mul.Tensor %1346, %1354 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1356 = torch.aten.mul.Tensor %1355, %182 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1357 = torch.aten.add.Tensor %1356, %181, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1358 = torch.aten.transpose.int %180, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1359 = torch.aten.view %1357, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1360 = torch.aten.mm %1359, %1358 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1361 = torch.aten.mul.Scalar %179, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1362 = torch.aten.add.Tensor %1361, %1360, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1363 = torch.aten.view %1362, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1364 = torch.aten.view %1363, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1365 = torch.aten.slice.Tensor %1364, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1366 = torch.aten.slice.Tensor %1364, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1367 = torch.aten.slice.Tensor %1364, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1368 = torch.aten.transpose.int %1365, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1369 = torch.aten.view %1368, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1370 = torch.aten.permute %1366, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1371 = torch.aten.view %1370, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1372 = torch.aten.bmm %1369, %1371 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1373 = torch.aten.mul.Scalar %1372, %float1.388890e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1374 = torch.aten.mul.Scalar %346, %float1.111110e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1375 = torch.aten.add.Tensor %1373, %1374, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1376 = torch.aten.view %1375, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1377 = torch.aten.mul.Scalar %1376, %int9 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1378 = torch.aten.add.Tensor %1377, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1379 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1380 = torch.aten.maximum %1378, %1379 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_18, %indices_19 = torch.aten.max.dim %1380, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1381 = torch.aten.sub.Tensor %1380, %values_18, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1382 = torch.aten.exp %1381 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1383 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1384 = torch.aten.sum.dim_IntList %1382, %1383, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1385 = torch.aten.div.Tensor %1382, %1384 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1386 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1387 = torch.aten.empty.memory_format %1386, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1388 = torch.valsem.aten.copy %1387, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1389 = torch.aten.bitwise_not %1388 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1390 = torch.aten.mul.Tensor %1385, %1389 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1391 = torch.aten.view %1390, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1392 = torch.aten.transpose.int %1367, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1393 = torch.aten.view %1392, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1394 = torch.aten.bmm %1391, %1393 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1395 = torch.aten.view %1394, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1396 = torch.aten.permute %1395, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1397 = torch.aten.clone %1396, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1398 = torch.aten.view %1397, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1399 = torch.aten.transpose.int %178, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1400 = torch.aten.view %1398, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1401 = torch.aten.mm %1400, %1399 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1402 = torch.aten.mul.Scalar %177, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1403 = torch.aten.add.Tensor %1402, %1401, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1404 = torch.aten.view %1403, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1405 = torch.aten.add.Tensor %1339, %1404, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1406 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1407 = torch.aten.sum.dim_IntList %1405, %1406, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1408 = torch.aten.div.Scalar %1407, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1409 = torch.aten.size.int %1405, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1410 = torch.prim.ListConstruct %1409, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1411 = torch.aten.broadcast_to %1408, %1410 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1412 = torch.aten.sub.Tensor %1405, %1411, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1413 = torch.aten.mul.Tensor %1412, %1412 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1414 = torch.aten.sum.dim_IntList %1413, %1406, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1415 = torch.aten.div.Scalar %1414, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1416 = torch.aten.add.Scalar %1415, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1417 = torch.aten.rsqrt %1416 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1418 = torch.aten.size.int %1405, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1419 = torch.prim.ListConstruct %1418, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1420 = torch.aten.broadcast_to %1417, %1419 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1421 = torch.aten.mul.Tensor %1412, %1420 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1422 = torch.aten.mul.Tensor %1421, %176 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1423 = torch.aten.add.Tensor %1422, %175, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1424 = torch.aten.transpose.int %174, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1425 = torch.aten.view %1423, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1426 = torch.aten.mm %1425, %1424 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1427 = torch.aten.mul.Scalar %173, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1428 = torch.aten.add.Tensor %1427, %1426, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1429 = torch.aten.view %1428, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1430 = torch.aten.mul.Scalar %1429, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1431 = torch.aten.mul.Scalar %1429, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1432 = torch.aten.mul.Scalar %1429, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1433 = torch.aten.mul.Tensor %1432, %1429 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1434 = torch.aten.add.Scalar %1433, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1435 = torch.aten.mul.Tensor %1431, %1434 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1436 = torch.aten.tanh %1435 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1437 = torch.aten.add.Scalar %1436, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1438 = torch.aten.mul.Tensor %1430, %1437 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1439 = torch.aten.transpose.int %172, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1440 = torch.aten.view %1438, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1441 = torch.aten.mm %1440, %1439 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1442 = torch.aten.mul.Scalar %171, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1443 = torch.aten.add.Tensor %1442, %1441, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1444 = torch.aten.view %1443, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1445 = torch.aten.add.Tensor %1405, %1444, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1446 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1447 = torch.aten.sum.dim_IntList %1445, %1446, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1448 = torch.aten.div.Scalar %1447, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1449 = torch.aten.size.int %1445, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1450 = torch.prim.ListConstruct %1449, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1451 = torch.aten.broadcast_to %1448, %1450 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1452 = torch.aten.sub.Tensor %1445, %1451, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1453 = torch.aten.mul.Tensor %1452, %1452 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1454 = torch.aten.sum.dim_IntList %1453, %1446, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1455 = torch.aten.div.Scalar %1454, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1456 = torch.aten.add.Scalar %1455, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1457 = torch.aten.rsqrt %1456 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1458 = torch.aten.size.int %1445, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1459 = torch.prim.ListConstruct %1458, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1460 = torch.aten.broadcast_to %1457, %1459 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1461 = torch.aten.mul.Tensor %1452, %1460 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1462 = torch.aten.mul.Tensor %1461, %170 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1463 = torch.aten.add.Tensor %1462, %169, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1464 = torch.aten.transpose.int %168, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1465 = torch.aten.view %1463, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1466 = torch.aten.mm %1465, %1464 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1467 = torch.aten.mul.Scalar %167, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1468 = torch.aten.add.Tensor %1467, %1466, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1469 = torch.aten.view %1468, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1470 = torch.aten.view %1469, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1471 = torch.aten.slice.Tensor %1470, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1472 = torch.aten.slice.Tensor %1470, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1473 = torch.aten.slice.Tensor %1470, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1474 = torch.aten.transpose.int %1471, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1475 = torch.aten.view %1474, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1476 = torch.aten.permute %1472, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1477 = torch.aten.view %1476, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1478 = torch.aten.bmm %1475, %1477 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1479 = torch.aten.mul.Scalar %1478, %float1.250000e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1480 = torch.aten.mul.Scalar %346, %float1.000000e-01 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1481 = torch.aten.add.Tensor %1479, %1480, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1482 = torch.aten.view %1481, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1483 = torch.aten.mul.Scalar %1482, %int10 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1484 = torch.aten.add.Tensor %1483, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1485 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1486 = torch.aten.maximum %1484, %1485 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_20, %indices_21 = torch.aten.max.dim %1486, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1487 = torch.aten.sub.Tensor %1486, %values_20, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1488 = torch.aten.exp %1487 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1489 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1490 = torch.aten.sum.dim_IntList %1488, %1489, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1491 = torch.aten.div.Tensor %1488, %1490 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1492 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1493 = torch.aten.empty.memory_format %1492, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1494 = torch.valsem.aten.copy %1493, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1495 = torch.aten.bitwise_not %1494 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1496 = torch.aten.mul.Tensor %1491, %1495 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1497 = torch.aten.view %1496, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1498 = torch.aten.transpose.int %1473, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1499 = torch.aten.view %1498, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1500 = torch.aten.bmm %1497, %1499 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1501 = torch.aten.view %1500, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1502 = torch.aten.permute %1501, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1503 = torch.aten.clone %1502, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1504 = torch.aten.view %1503, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1505 = torch.aten.transpose.int %166, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1506 = torch.aten.view %1504, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1507 = torch.aten.mm %1506, %1505 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1508 = torch.aten.mul.Scalar %165, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1509 = torch.aten.add.Tensor %1508, %1507, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1510 = torch.aten.view %1509, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1511 = torch.aten.add.Tensor %1445, %1510, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1512 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1513 = torch.aten.sum.dim_IntList %1511, %1512, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1514 = torch.aten.div.Scalar %1513, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1515 = torch.aten.size.int %1511, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1516 = torch.prim.ListConstruct %1515, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1517 = torch.aten.broadcast_to %1514, %1516 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1518 = torch.aten.sub.Tensor %1511, %1517, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1519 = torch.aten.mul.Tensor %1518, %1518 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1520 = torch.aten.sum.dim_IntList %1519, %1512, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1521 = torch.aten.div.Scalar %1520, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1522 = torch.aten.add.Scalar %1521, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1523 = torch.aten.rsqrt %1522 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1524 = torch.aten.size.int %1511, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1525 = torch.prim.ListConstruct %1524, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1526 = torch.aten.broadcast_to %1523, %1525 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1527 = torch.aten.mul.Tensor %1518, %1526 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1528 = torch.aten.mul.Tensor %1527, %164 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1529 = torch.aten.add.Tensor %1528, %163, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1530 = torch.aten.transpose.int %162, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1531 = torch.aten.view %1529, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1532 = torch.aten.mm %1531, %1530 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1533 = torch.aten.mul.Scalar %161, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1534 = torch.aten.add.Tensor %1533, %1532, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1535 = torch.aten.view %1534, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1536 = torch.aten.mul.Scalar %1535, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1537 = torch.aten.mul.Scalar %1535, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1538 = torch.aten.mul.Scalar %1535, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1539 = torch.aten.mul.Tensor %1538, %1535 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1540 = torch.aten.add.Scalar %1539, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1541 = torch.aten.mul.Tensor %1537, %1540 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1542 = torch.aten.tanh %1541 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1543 = torch.aten.add.Scalar %1542, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1544 = torch.aten.mul.Tensor %1536, %1543 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1545 = torch.aten.transpose.int %160, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1546 = torch.aten.view %1544, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1547 = torch.aten.mm %1546, %1545 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1548 = torch.aten.mul.Scalar %159, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1549 = torch.aten.add.Tensor %1548, %1547, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1550 = torch.aten.view %1549, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1551 = torch.aten.add.Tensor %1511, %1550, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1552 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1553 = torch.aten.sum.dim_IntList %1551, %1552, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1554 = torch.aten.div.Scalar %1553, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1555 = torch.aten.size.int %1551, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1556 = torch.prim.ListConstruct %1555, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1557 = torch.aten.broadcast_to %1554, %1556 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1558 = torch.aten.sub.Tensor %1551, %1557, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1559 = torch.aten.mul.Tensor %1558, %1558 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1560 = torch.aten.sum.dim_IntList %1559, %1552, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1561 = torch.aten.div.Scalar %1560, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1562 = torch.aten.add.Scalar %1561, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1563 = torch.aten.rsqrt %1562 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1564 = torch.aten.size.int %1551, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1565 = torch.prim.ListConstruct %1564, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1566 = torch.aten.broadcast_to %1563, %1565 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1567 = torch.aten.mul.Tensor %1558, %1566 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1568 = torch.aten.mul.Tensor %1567, %158 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1569 = torch.aten.add.Tensor %1568, %157, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1570 = torch.aten.transpose.int %156, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1571 = torch.aten.view %1569, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1572 = torch.aten.mm %1571, %1570 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1573 = torch.aten.mul.Scalar %155, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1574 = torch.aten.add.Tensor %1573, %1572, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1575 = torch.aten.view %1574, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1576 = torch.aten.view %1575, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1577 = torch.aten.slice.Tensor %1576, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1578 = torch.aten.slice.Tensor %1576, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1579 = torch.aten.slice.Tensor %1576, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1580 = torch.aten.transpose.int %1577, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1581 = torch.aten.view %1580, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1582 = torch.aten.permute %1578, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1583 = torch.aten.view %1582, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1584 = torch.aten.bmm %1581, %1583 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1585 = torch.aten.mul.Scalar %1584, %float1.136360e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1586 = torch.aten.mul.Scalar %346, %float9.090900e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1587 = torch.aten.add.Tensor %1585, %1586, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1588 = torch.aten.view %1587, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1589 = torch.aten.mul.Scalar %1588, %int11 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1590 = torch.aten.add.Tensor %1589, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1591 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1592 = torch.aten.maximum %1590, %1591 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_22, %indices_23 = torch.aten.max.dim %1592, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1593 = torch.aten.sub.Tensor %1592, %values_22, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1594 = torch.aten.exp %1593 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1595 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1596 = torch.aten.sum.dim_IntList %1594, %1595, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1597 = torch.aten.div.Tensor %1594, %1596 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1598 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1599 = torch.aten.empty.memory_format %1598, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1600 = torch.valsem.aten.copy %1599, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1601 = torch.aten.bitwise_not %1600 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1602 = torch.aten.mul.Tensor %1597, %1601 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1603 = torch.aten.view %1602, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1604 = torch.aten.transpose.int %1579, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1605 = torch.aten.view %1604, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1606 = torch.aten.bmm %1603, %1605 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1607 = torch.aten.view %1606, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1608 = torch.aten.permute %1607, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1609 = torch.aten.clone %1608, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1610 = torch.aten.view %1609, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1611 = torch.aten.transpose.int %154, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1612 = torch.aten.view %1610, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1613 = torch.aten.mm %1612, %1611 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1614 = torch.aten.mul.Scalar %153, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1615 = torch.aten.add.Tensor %1614, %1613, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1616 = torch.aten.view %1615, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1617 = torch.aten.add.Tensor %1551, %1616, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1618 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1619 = torch.aten.sum.dim_IntList %1617, %1618, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1620 = torch.aten.div.Scalar %1619, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1621 = torch.aten.size.int %1617, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1622 = torch.prim.ListConstruct %1621, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1623 = torch.aten.broadcast_to %1620, %1622 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1624 = torch.aten.sub.Tensor %1617, %1623, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1625 = torch.aten.mul.Tensor %1624, %1624 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1626 = torch.aten.sum.dim_IntList %1625, %1618, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1627 = torch.aten.div.Scalar %1626, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1628 = torch.aten.add.Scalar %1627, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1629 = torch.aten.rsqrt %1628 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1630 = torch.aten.size.int %1617, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1631 = torch.prim.ListConstruct %1630, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1632 = torch.aten.broadcast_to %1629, %1631 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1633 = torch.aten.mul.Tensor %1624, %1632 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1634 = torch.aten.mul.Tensor %1633, %152 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1635 = torch.aten.add.Tensor %1634, %151, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1636 = torch.aten.transpose.int %150, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1637 = torch.aten.view %1635, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1638 = torch.aten.mm %1637, %1636 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1639 = torch.aten.mul.Scalar %149, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1640 = torch.aten.add.Tensor %1639, %1638, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1641 = torch.aten.view %1640, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1642 = torch.aten.mul.Scalar %1641, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1643 = torch.aten.mul.Scalar %1641, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1644 = torch.aten.mul.Scalar %1641, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1645 = torch.aten.mul.Tensor %1644, %1641 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1646 = torch.aten.add.Scalar %1645, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1647 = torch.aten.mul.Tensor %1643, %1646 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1648 = torch.aten.tanh %1647 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1649 = torch.aten.add.Scalar %1648, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1650 = torch.aten.mul.Tensor %1642, %1649 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1651 = torch.aten.transpose.int %148, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1652 = torch.aten.view %1650, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1653 = torch.aten.mm %1652, %1651 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1654 = torch.aten.mul.Scalar %147, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1655 = torch.aten.add.Tensor %1654, %1653, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1656 = torch.aten.view %1655, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1657 = torch.aten.add.Tensor %1617, %1656, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1658 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1659 = torch.aten.sum.dim_IntList %1657, %1658, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1660 = torch.aten.div.Scalar %1659, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1661 = torch.aten.size.int %1657, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1662 = torch.prim.ListConstruct %1661, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1663 = torch.aten.broadcast_to %1660, %1662 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1664 = torch.aten.sub.Tensor %1657, %1663, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1665 = torch.aten.mul.Tensor %1664, %1664 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1666 = torch.aten.sum.dim_IntList %1665, %1658, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1667 = torch.aten.div.Scalar %1666, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1668 = torch.aten.add.Scalar %1667, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1669 = torch.aten.rsqrt %1668 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1670 = torch.aten.size.int %1657, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1671 = torch.prim.ListConstruct %1670, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1672 = torch.aten.broadcast_to %1669, %1671 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1673 = torch.aten.mul.Tensor %1664, %1672 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1674 = torch.aten.mul.Tensor %1673, %146 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1675 = torch.aten.add.Tensor %1674, %145, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1676 = torch.aten.transpose.int %144, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1677 = torch.aten.view %1675, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1678 = torch.aten.mm %1677, %1676 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1679 = torch.aten.mul.Scalar %143, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1680 = torch.aten.add.Tensor %1679, %1678, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1681 = torch.aten.view %1680, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1682 = torch.aten.view %1681, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1683 = torch.aten.slice.Tensor %1682, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1684 = torch.aten.slice.Tensor %1682, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1685 = torch.aten.slice.Tensor %1682, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1686 = torch.aten.transpose.int %1683, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1687 = torch.aten.view %1686, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1688 = torch.aten.permute %1684, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1689 = torch.aten.view %1688, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1690 = torch.aten.bmm %1687, %1689 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1691 = torch.aten.mul.Scalar %1690, %float1.041670e-02 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1692 = torch.aten.mul.Scalar %346, %float8.333330e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1693 = torch.aten.add.Tensor %1691, %1692, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1694 = torch.aten.view %1693, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1695 = torch.aten.mul.Scalar %1694, %int12 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1696 = torch.aten.add.Tensor %1695, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1697 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1698 = torch.aten.maximum %1696, %1697 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_24, %indices_25 = torch.aten.max.dim %1698, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1699 = torch.aten.sub.Tensor %1698, %values_24, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1700 = torch.aten.exp %1699 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1701 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1702 = torch.aten.sum.dim_IntList %1700, %1701, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1703 = torch.aten.div.Tensor %1700, %1702 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1704 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1705 = torch.aten.empty.memory_format %1704, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1706 = torch.valsem.aten.copy %1705, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1707 = torch.aten.bitwise_not %1706 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1708 = torch.aten.mul.Tensor %1703, %1707 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1709 = torch.aten.view %1708, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1710 = torch.aten.transpose.int %1685, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1711 = torch.aten.view %1710, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1712 = torch.aten.bmm %1709, %1711 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1713 = torch.aten.view %1712, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1714 = torch.aten.permute %1713, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1715 = torch.aten.clone %1714, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1716 = torch.aten.view %1715, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1717 = torch.aten.transpose.int %142, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1718 = torch.aten.view %1716, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1719 = torch.aten.mm %1718, %1717 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1720 = torch.aten.mul.Scalar %141, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1721 = torch.aten.add.Tensor %1720, %1719, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1722 = torch.aten.view %1721, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1723 = torch.aten.add.Tensor %1657, %1722, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1724 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1725 = torch.aten.sum.dim_IntList %1723, %1724, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1726 = torch.aten.div.Scalar %1725, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1727 = torch.aten.size.int %1723, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1728 = torch.prim.ListConstruct %1727, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1729 = torch.aten.broadcast_to %1726, %1728 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1730 = torch.aten.sub.Tensor %1723, %1729, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1731 = torch.aten.mul.Tensor %1730, %1730 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1732 = torch.aten.sum.dim_IntList %1731, %1724, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1733 = torch.aten.div.Scalar %1732, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1734 = torch.aten.add.Scalar %1733, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1735 = torch.aten.rsqrt %1734 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1736 = torch.aten.size.int %1723, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1737 = torch.prim.ListConstruct %1736, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1738 = torch.aten.broadcast_to %1735, %1737 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1739 = torch.aten.mul.Tensor %1730, %1738 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1740 = torch.aten.mul.Tensor %1739, %140 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1741 = torch.aten.add.Tensor %1740, %139, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1742 = torch.aten.transpose.int %138, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1743 = torch.aten.view %1741, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1744 = torch.aten.mm %1743, %1742 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1745 = torch.aten.mul.Scalar %137, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1746 = torch.aten.add.Tensor %1745, %1744, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1747 = torch.aten.view %1746, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1748 = torch.aten.mul.Scalar %1747, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1749 = torch.aten.mul.Scalar %1747, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1750 = torch.aten.mul.Scalar %1747, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1751 = torch.aten.mul.Tensor %1750, %1747 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1752 = torch.aten.add.Scalar %1751, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1753 = torch.aten.mul.Tensor %1749, %1752 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1754 = torch.aten.tanh %1753 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1755 = torch.aten.add.Scalar %1754, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1756 = torch.aten.mul.Tensor %1748, %1755 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1757 = torch.aten.transpose.int %136, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1758 = torch.aten.view %1756, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1759 = torch.aten.mm %1758, %1757 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1760 = torch.aten.mul.Scalar %135, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1761 = torch.aten.add.Tensor %1760, %1759, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1762 = torch.aten.view %1761, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1763 = torch.aten.add.Tensor %1723, %1762, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1764 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1765 = torch.aten.sum.dim_IntList %1763, %1764, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1766 = torch.aten.div.Scalar %1765, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1767 = torch.aten.size.int %1763, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1768 = torch.prim.ListConstruct %1767, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1769 = torch.aten.broadcast_to %1766, %1768 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1770 = torch.aten.sub.Tensor %1763, %1769, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1771 = torch.aten.mul.Tensor %1770, %1770 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1772 = torch.aten.sum.dim_IntList %1771, %1764, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1773 = torch.aten.div.Scalar %1772, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1774 = torch.aten.add.Scalar %1773, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1775 = torch.aten.rsqrt %1774 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1776 = torch.aten.size.int %1763, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1777 = torch.prim.ListConstruct %1776, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1778 = torch.aten.broadcast_to %1775, %1777 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1779 = torch.aten.mul.Tensor %1770, %1778 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1780 = torch.aten.mul.Tensor %1779, %134 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1781 = torch.aten.add.Tensor %1780, %133, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1782 = torch.aten.transpose.int %132, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1783 = torch.aten.view %1781, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1784 = torch.aten.mm %1783, %1782 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1785 = torch.aten.mul.Scalar %131, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1786 = torch.aten.add.Tensor %1785, %1784, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1787 = torch.aten.view %1786, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1788 = torch.aten.view %1787, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1789 = torch.aten.slice.Tensor %1788, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1790 = torch.aten.slice.Tensor %1788, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1791 = torch.aten.slice.Tensor %1788, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1792 = torch.aten.transpose.int %1789, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1793 = torch.aten.view %1792, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1794 = torch.aten.permute %1790, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1795 = torch.aten.view %1794, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1796 = torch.aten.bmm %1793, %1795 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1797 = torch.aten.mul.Scalar %1796, %float9.615380e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1798 = torch.aten.mul.Scalar %346, %float7.692310e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1799 = torch.aten.add.Tensor %1797, %1798, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1800 = torch.aten.view %1799, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1801 = torch.aten.mul.Scalar %1800, %int13 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1802 = torch.aten.add.Tensor %1801, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1803 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1804 = torch.aten.maximum %1802, %1803 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_26, %indices_27 = torch.aten.max.dim %1804, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1805 = torch.aten.sub.Tensor %1804, %values_26, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1806 = torch.aten.exp %1805 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1807 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1808 = torch.aten.sum.dim_IntList %1806, %1807, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1809 = torch.aten.div.Tensor %1806, %1808 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1810 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1811 = torch.aten.empty.memory_format %1810, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1812 = torch.valsem.aten.copy %1811, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1813 = torch.aten.bitwise_not %1812 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1814 = torch.aten.mul.Tensor %1809, %1813 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1815 = torch.aten.view %1814, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1816 = torch.aten.transpose.int %1791, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1817 = torch.aten.view %1816, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1818 = torch.aten.bmm %1815, %1817 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1819 = torch.aten.view %1818, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1820 = torch.aten.permute %1819, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1821 = torch.aten.clone %1820, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1822 = torch.aten.view %1821, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1823 = torch.aten.transpose.int %130, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1824 = torch.aten.view %1822, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1825 = torch.aten.mm %1824, %1823 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1826 = torch.aten.mul.Scalar %129, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1827 = torch.aten.add.Tensor %1826, %1825, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1828 = torch.aten.view %1827, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1829 = torch.aten.add.Tensor %1763, %1828, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1830 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1831 = torch.aten.sum.dim_IntList %1829, %1830, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1832 = torch.aten.div.Scalar %1831, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1833 = torch.aten.size.int %1829, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1834 = torch.prim.ListConstruct %1833, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1835 = torch.aten.broadcast_to %1832, %1834 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1836 = torch.aten.sub.Tensor %1829, %1835, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1837 = torch.aten.mul.Tensor %1836, %1836 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1838 = torch.aten.sum.dim_IntList %1837, %1830, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1839 = torch.aten.div.Scalar %1838, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1840 = torch.aten.add.Scalar %1839, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1841 = torch.aten.rsqrt %1840 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1842 = torch.aten.size.int %1829, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1843 = torch.prim.ListConstruct %1842, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1844 = torch.aten.broadcast_to %1841, %1843 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1845 = torch.aten.mul.Tensor %1836, %1844 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1846 = torch.aten.mul.Tensor %1845, %128 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1847 = torch.aten.add.Tensor %1846, %127, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1848 = torch.aten.transpose.int %126, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1849 = torch.aten.view %1847, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1850 = torch.aten.mm %1849, %1848 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1851 = torch.aten.mul.Scalar %125, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1852 = torch.aten.add.Tensor %1851, %1850, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1853 = torch.aten.view %1852, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1854 = torch.aten.mul.Scalar %1853, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1855 = torch.aten.mul.Scalar %1853, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1856 = torch.aten.mul.Scalar %1853, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1857 = torch.aten.mul.Tensor %1856, %1853 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1858 = torch.aten.add.Scalar %1857, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1859 = torch.aten.mul.Tensor %1855, %1858 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1860 = torch.aten.tanh %1859 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1861 = torch.aten.add.Scalar %1860, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1862 = torch.aten.mul.Tensor %1854, %1861 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1863 = torch.aten.transpose.int %124, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1864 = torch.aten.view %1862, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1865 = torch.aten.mm %1864, %1863 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1866 = torch.aten.mul.Scalar %123, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1867 = torch.aten.add.Tensor %1866, %1865, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1868 = torch.aten.view %1867, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1869 = torch.aten.add.Tensor %1829, %1868, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1870 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1871 = torch.aten.sum.dim_IntList %1869, %1870, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1872 = torch.aten.div.Scalar %1871, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1873 = torch.aten.size.int %1869, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1874 = torch.prim.ListConstruct %1873, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1875 = torch.aten.broadcast_to %1872, %1874 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1876 = torch.aten.sub.Tensor %1869, %1875, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1877 = torch.aten.mul.Tensor %1876, %1876 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1878 = torch.aten.sum.dim_IntList %1877, %1870, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1879 = torch.aten.div.Scalar %1878, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1880 = torch.aten.add.Scalar %1879, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1881 = torch.aten.rsqrt %1880 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1882 = torch.aten.size.int %1869, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1883 = torch.prim.ListConstruct %1882, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1884 = torch.aten.broadcast_to %1881, %1883 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1885 = torch.aten.mul.Tensor %1876, %1884 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1886 = torch.aten.mul.Tensor %1885, %122 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1887 = torch.aten.add.Tensor %1886, %121, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1888 = torch.aten.transpose.int %120, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1889 = torch.aten.view %1887, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1890 = torch.aten.mm %1889, %1888 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1891 = torch.aten.mul.Scalar %119, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1892 = torch.aten.add.Tensor %1891, %1890, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1893 = torch.aten.view %1892, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %1894 = torch.aten.view %1893, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %1895 = torch.aten.slice.Tensor %1894, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1896 = torch.aten.slice.Tensor %1894, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1897 = torch.aten.slice.Tensor %1894, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1898 = torch.aten.transpose.int %1895, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1899 = torch.aten.view %1898, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1900 = torch.aten.permute %1896, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %1901 = torch.aten.view %1900, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %1902 = torch.aten.bmm %1899, %1901 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %1903 = torch.aten.mul.Scalar %1902, %float8.928570e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %1904 = torch.aten.mul.Scalar %346, %float7.142860e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %1905 = torch.aten.add.Tensor %1903, %1904, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %1906 = torch.aten.view %1905, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1907 = torch.aten.mul.Scalar %1906, %int14 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1908 = torch.aten.add.Tensor %1907, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %1909 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %1910 = torch.aten.maximum %1908, %1909 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_28, %indices_29 = torch.aten.max.dim %1910, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %1911 = torch.aten.sub.Tensor %1910, %values_28, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %1912 = torch.aten.exp %1911 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1913 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %1914 = torch.aten.sum.dim_IntList %1912, %1913, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %1915 = torch.aten.div.Tensor %1912, %1914 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1916 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1917 = torch.aten.empty.memory_format %1916, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %1918 = torch.valsem.aten.copy %1917, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %1919 = torch.aten.bitwise_not %1918 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %1920 = torch.aten.mul.Tensor %1915, %1919 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %1921 = torch.aten.view %1920, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %1922 = torch.aten.transpose.int %1897, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %1923 = torch.aten.view %1922, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %1924 = torch.aten.bmm %1921, %1923 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %1925 = torch.aten.view %1924, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %1926 = torch.aten.permute %1925, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %1927 = torch.aten.clone %1926, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %1928 = torch.aten.view %1927, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1929 = torch.aten.transpose.int %118, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %1930 = torch.aten.view %1928, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1931 = torch.aten.mm %1930, %1929 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1932 = torch.aten.mul.Scalar %117, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1933 = torch.aten.add.Tensor %1932, %1931, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1934 = torch.aten.view %1933, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1935 = torch.aten.add.Tensor %1869, %1934, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1936 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1937 = torch.aten.sum.dim_IntList %1935, %1936, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1938 = torch.aten.div.Scalar %1937, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1939 = torch.aten.size.int %1935, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1940 = torch.prim.ListConstruct %1939, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1941 = torch.aten.broadcast_to %1938, %1940 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1942 = torch.aten.sub.Tensor %1935, %1941, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1943 = torch.aten.mul.Tensor %1942, %1942 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1944 = torch.aten.sum.dim_IntList %1943, %1936, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1945 = torch.aten.div.Scalar %1944, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1946 = torch.aten.add.Scalar %1945, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1947 = torch.aten.rsqrt %1946 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1948 = torch.aten.size.int %1935, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1949 = torch.prim.ListConstruct %1948, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1950 = torch.aten.broadcast_to %1947, %1949 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1951 = torch.aten.mul.Tensor %1942, %1950 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1952 = torch.aten.mul.Tensor %1951, %116 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1953 = torch.aten.add.Tensor %1952, %115, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1954 = torch.aten.transpose.int %114, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %1955 = torch.aten.view %1953, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1956 = torch.aten.mm %1955, %1954 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %1957 = torch.aten.mul.Scalar %113, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %1958 = torch.aten.add.Tensor %1957, %1956, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %1959 = torch.aten.view %1958, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %1960 = torch.aten.mul.Scalar %1959, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1961 = torch.aten.mul.Scalar %1959, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1962 = torch.aten.mul.Scalar %1959, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %1963 = torch.aten.mul.Tensor %1962, %1959 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1964 = torch.aten.add.Scalar %1963, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1965 = torch.aten.mul.Tensor %1961, %1964 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1966 = torch.aten.tanh %1965 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1967 = torch.aten.add.Scalar %1966, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %1968 = torch.aten.mul.Tensor %1960, %1967 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %1969 = torch.aten.transpose.int %112, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %1970 = torch.aten.view %1968, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %1971 = torch.aten.mm %1970, %1969 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %1972 = torch.aten.mul.Scalar %111, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %1973 = torch.aten.add.Tensor %1972, %1971, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %1974 = torch.aten.view %1973, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %1975 = torch.aten.add.Tensor %1935, %1974, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1976 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %1977 = torch.aten.sum.dim_IntList %1975, %1976, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1978 = torch.aten.div.Scalar %1977, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1979 = torch.aten.size.int %1975, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1980 = torch.prim.ListConstruct %1979, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1981 = torch.aten.broadcast_to %1978, %1980 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1982 = torch.aten.sub.Tensor %1975, %1981, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1983 = torch.aten.mul.Tensor %1982, %1982 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1984 = torch.aten.sum.dim_IntList %1983, %1976, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %1985 = torch.aten.div.Scalar %1984, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1986 = torch.aten.add.Scalar %1985, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %1987 = torch.aten.rsqrt %1986 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %1988 = torch.aten.size.int %1975, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %1989 = torch.prim.ListConstruct %1988, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %1990 = torch.aten.broadcast_to %1987, %1989 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %1991 = torch.aten.mul.Tensor %1982, %1990 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1992 = torch.aten.mul.Tensor %1991, %110 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %1993 = torch.aten.add.Tensor %1992, %109, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %1994 = torch.aten.transpose.int %108, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %1995 = torch.aten.view %1993, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %1996 = torch.aten.mm %1995, %1994 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %1997 = torch.aten.mul.Scalar %107, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %1998 = torch.aten.add.Tensor %1997, %1996, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %1999 = torch.aten.view %1998, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2000 = torch.aten.view %1999, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2001 = torch.aten.slice.Tensor %2000, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2002 = torch.aten.slice.Tensor %2000, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2003 = torch.aten.slice.Tensor %2000, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2004 = torch.aten.transpose.int %2001, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2005 = torch.aten.view %2004, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2006 = torch.aten.permute %2002, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2007 = torch.aten.view %2006, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2008 = torch.aten.bmm %2005, %2007 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2009 = torch.aten.mul.Scalar %2008, %float8.333330e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2010 = torch.aten.mul.Scalar %346, %float6.666660e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2011 = torch.aten.add.Tensor %2009, %2010, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2012 = torch.aten.view %2011, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2013 = torch.aten.mul.Scalar %2012, %int15 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2014 = torch.aten.add.Tensor %2013, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2015 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2016 = torch.aten.maximum %2014, %2015 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_30, %indices_31 = torch.aten.max.dim %2016, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2017 = torch.aten.sub.Tensor %2016, %values_30, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2018 = torch.aten.exp %2017 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2019 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2020 = torch.aten.sum.dim_IntList %2018, %2019, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2021 = torch.aten.div.Tensor %2018, %2020 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2022 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2023 = torch.aten.empty.memory_format %2022, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2024 = torch.valsem.aten.copy %2023, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2025 = torch.aten.bitwise_not %2024 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2026 = torch.aten.mul.Tensor %2021, %2025 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2027 = torch.aten.view %2026, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2028 = torch.aten.transpose.int %2003, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2029 = torch.aten.view %2028, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2030 = torch.aten.bmm %2027, %2029 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2031 = torch.aten.view %2030, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2032 = torch.aten.permute %2031, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2033 = torch.aten.clone %2032, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2034 = torch.aten.view %2033, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2035 = torch.aten.transpose.int %106, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2036 = torch.aten.view %2034, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2037 = torch.aten.mm %2036, %2035 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2038 = torch.aten.mul.Scalar %105, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2039 = torch.aten.add.Tensor %2038, %2037, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2040 = torch.aten.view %2039, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2041 = torch.aten.add.Tensor %1975, %2040, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2042 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2043 = torch.aten.sum.dim_IntList %2041, %2042, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2044 = torch.aten.div.Scalar %2043, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2045 = torch.aten.size.int %2041, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2046 = torch.prim.ListConstruct %2045, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2047 = torch.aten.broadcast_to %2044, %2046 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2048 = torch.aten.sub.Tensor %2041, %2047, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2049 = torch.aten.mul.Tensor %2048, %2048 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2050 = torch.aten.sum.dim_IntList %2049, %2042, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2051 = torch.aten.div.Scalar %2050, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2052 = torch.aten.add.Scalar %2051, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2053 = torch.aten.rsqrt %2052 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2054 = torch.aten.size.int %2041, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2055 = torch.prim.ListConstruct %2054, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2056 = torch.aten.broadcast_to %2053, %2055 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2057 = torch.aten.mul.Tensor %2048, %2056 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2058 = torch.aten.mul.Tensor %2057, %104 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2059 = torch.aten.add.Tensor %2058, %103, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2060 = torch.aten.transpose.int %102, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2061 = torch.aten.view %2059, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2062 = torch.aten.mm %2061, %2060 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2063 = torch.aten.mul.Scalar %101, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2064 = torch.aten.add.Tensor %2063, %2062, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2065 = torch.aten.view %2064, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2066 = torch.aten.mul.Scalar %2065, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2067 = torch.aten.mul.Scalar %2065, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2068 = torch.aten.mul.Scalar %2065, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2069 = torch.aten.mul.Tensor %2068, %2065 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2070 = torch.aten.add.Scalar %2069, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2071 = torch.aten.mul.Tensor %2067, %2070 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2072 = torch.aten.tanh %2071 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2073 = torch.aten.add.Scalar %2072, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2074 = torch.aten.mul.Tensor %2066, %2073 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2075 = torch.aten.transpose.int %100, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2076 = torch.aten.view %2074, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2077 = torch.aten.mm %2076, %2075 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2078 = torch.aten.mul.Scalar %99, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2079 = torch.aten.add.Tensor %2078, %2077, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2080 = torch.aten.view %2079, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2081 = torch.aten.add.Tensor %2041, %2080, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2082 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2083 = torch.aten.sum.dim_IntList %2081, %2082, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2084 = torch.aten.div.Scalar %2083, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2085 = torch.aten.size.int %2081, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2086 = torch.prim.ListConstruct %2085, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2087 = torch.aten.broadcast_to %2084, %2086 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2088 = torch.aten.sub.Tensor %2081, %2087, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2089 = torch.aten.mul.Tensor %2088, %2088 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2090 = torch.aten.sum.dim_IntList %2089, %2082, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2091 = torch.aten.div.Scalar %2090, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2092 = torch.aten.add.Scalar %2091, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2093 = torch.aten.rsqrt %2092 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2094 = torch.aten.size.int %2081, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2095 = torch.prim.ListConstruct %2094, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2096 = torch.aten.broadcast_to %2093, %2095 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2097 = torch.aten.mul.Tensor %2088, %2096 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2098 = torch.aten.mul.Tensor %2097, %98 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2099 = torch.aten.add.Tensor %2098, %97, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2100 = torch.aten.transpose.int %96, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2101 = torch.aten.view %2099, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2102 = torch.aten.mm %2101, %2100 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2103 = torch.aten.mul.Scalar %95, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2104 = torch.aten.add.Tensor %2103, %2102, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2105 = torch.aten.view %2104, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2106 = torch.aten.view %2105, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2107 = torch.aten.slice.Tensor %2106, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2108 = torch.aten.slice.Tensor %2106, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2109 = torch.aten.slice.Tensor %2106, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2110 = torch.aten.transpose.int %2107, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2111 = torch.aten.view %2110, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2112 = torch.aten.permute %2108, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2113 = torch.aten.view %2112, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2114 = torch.aten.bmm %2111, %2113 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2115 = torch.aten.mul.Scalar %2114, %float7.812500e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2116 = torch.aten.mul.Scalar %346, %float6.250000e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2117 = torch.aten.add.Tensor %2115, %2116, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2118 = torch.aten.view %2117, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2119 = torch.aten.mul.Scalar %2118, %int16 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2120 = torch.aten.add.Tensor %2119, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2121 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2122 = torch.aten.maximum %2120, %2121 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_32, %indices_33 = torch.aten.max.dim %2122, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2123 = torch.aten.sub.Tensor %2122, %values_32, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2124 = torch.aten.exp %2123 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2125 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2126 = torch.aten.sum.dim_IntList %2124, %2125, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2127 = torch.aten.div.Tensor %2124, %2126 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2128 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2129 = torch.aten.empty.memory_format %2128, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2130 = torch.valsem.aten.copy %2129, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2131 = torch.aten.bitwise_not %2130 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2132 = torch.aten.mul.Tensor %2127, %2131 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2133 = torch.aten.view %2132, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2134 = torch.aten.transpose.int %2109, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2135 = torch.aten.view %2134, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2136 = torch.aten.bmm %2133, %2135 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2137 = torch.aten.view %2136, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2138 = torch.aten.permute %2137, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2139 = torch.aten.clone %2138, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2140 = torch.aten.view %2139, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2141 = torch.aten.transpose.int %94, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2142 = torch.aten.view %2140, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2143 = torch.aten.mm %2142, %2141 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2144 = torch.aten.mul.Scalar %93, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2145 = torch.aten.add.Tensor %2144, %2143, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2146 = torch.aten.view %2145, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2147 = torch.aten.add.Tensor %2081, %2146, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2148 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2149 = torch.aten.sum.dim_IntList %2147, %2148, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2150 = torch.aten.div.Scalar %2149, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2151 = torch.aten.size.int %2147, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2152 = torch.prim.ListConstruct %2151, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2153 = torch.aten.broadcast_to %2150, %2152 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2154 = torch.aten.sub.Tensor %2147, %2153, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2155 = torch.aten.mul.Tensor %2154, %2154 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2156 = torch.aten.sum.dim_IntList %2155, %2148, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2157 = torch.aten.div.Scalar %2156, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2158 = torch.aten.add.Scalar %2157, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2159 = torch.aten.rsqrt %2158 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2160 = torch.aten.size.int %2147, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2161 = torch.prim.ListConstruct %2160, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2162 = torch.aten.broadcast_to %2159, %2161 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2163 = torch.aten.mul.Tensor %2154, %2162 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2164 = torch.aten.mul.Tensor %2163, %92 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2165 = torch.aten.add.Tensor %2164, %91, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2166 = torch.aten.transpose.int %90, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2167 = torch.aten.view %2165, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2168 = torch.aten.mm %2167, %2166 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2169 = torch.aten.mul.Scalar %89, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2170 = torch.aten.add.Tensor %2169, %2168, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2171 = torch.aten.view %2170, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2172 = torch.aten.mul.Scalar %2171, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2173 = torch.aten.mul.Scalar %2171, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2174 = torch.aten.mul.Scalar %2171, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2175 = torch.aten.mul.Tensor %2174, %2171 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2176 = torch.aten.add.Scalar %2175, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2177 = torch.aten.mul.Tensor %2173, %2176 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2178 = torch.aten.tanh %2177 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2179 = torch.aten.add.Scalar %2178, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2180 = torch.aten.mul.Tensor %2172, %2179 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2181 = torch.aten.transpose.int %88, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2182 = torch.aten.view %2180, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2183 = torch.aten.mm %2182, %2181 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2184 = torch.aten.mul.Scalar %87, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2185 = torch.aten.add.Tensor %2184, %2183, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2186 = torch.aten.view %2185, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2187 = torch.aten.add.Tensor %2147, %2186, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2188 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2189 = torch.aten.sum.dim_IntList %2187, %2188, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2190 = torch.aten.div.Scalar %2189, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2191 = torch.aten.size.int %2187, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2192 = torch.prim.ListConstruct %2191, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2193 = torch.aten.broadcast_to %2190, %2192 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2194 = torch.aten.sub.Tensor %2187, %2193, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2195 = torch.aten.mul.Tensor %2194, %2194 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2196 = torch.aten.sum.dim_IntList %2195, %2188, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2197 = torch.aten.div.Scalar %2196, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2198 = torch.aten.add.Scalar %2197, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2199 = torch.aten.rsqrt %2198 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2200 = torch.aten.size.int %2187, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2201 = torch.prim.ListConstruct %2200, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2202 = torch.aten.broadcast_to %2199, %2201 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2203 = torch.aten.mul.Tensor %2194, %2202 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2204 = torch.aten.mul.Tensor %2203, %86 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2205 = torch.aten.add.Tensor %2204, %85, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2206 = torch.aten.transpose.int %84, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2207 = torch.aten.view %2205, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2208 = torch.aten.mm %2207, %2206 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2209 = torch.aten.mul.Scalar %83, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2210 = torch.aten.add.Tensor %2209, %2208, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2211 = torch.aten.view %2210, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2212 = torch.aten.view %2211, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2213 = torch.aten.slice.Tensor %2212, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2214 = torch.aten.slice.Tensor %2212, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2215 = torch.aten.slice.Tensor %2212, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2216 = torch.aten.transpose.int %2213, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2217 = torch.aten.view %2216, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2218 = torch.aten.permute %2214, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2219 = torch.aten.view %2218, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2220 = torch.aten.bmm %2217, %2219 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2221 = torch.aten.mul.Scalar %2220, %float7.352940e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2222 = torch.aten.mul.Scalar %346, %float5.882350e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2223 = torch.aten.add.Tensor %2221, %2222, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2224 = torch.aten.view %2223, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2225 = torch.aten.mul.Scalar %2224, %int17 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2226 = torch.aten.add.Tensor %2225, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2227 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2228 = torch.aten.maximum %2226, %2227 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_34, %indices_35 = torch.aten.max.dim %2228, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2229 = torch.aten.sub.Tensor %2228, %values_34, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2230 = torch.aten.exp %2229 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2231 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2232 = torch.aten.sum.dim_IntList %2230, %2231, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2233 = torch.aten.div.Tensor %2230, %2232 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2234 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2235 = torch.aten.empty.memory_format %2234, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2236 = torch.valsem.aten.copy %2235, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2237 = torch.aten.bitwise_not %2236 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2238 = torch.aten.mul.Tensor %2233, %2237 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2239 = torch.aten.view %2238, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2240 = torch.aten.transpose.int %2215, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2241 = torch.aten.view %2240, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2242 = torch.aten.bmm %2239, %2241 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2243 = torch.aten.view %2242, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2244 = torch.aten.permute %2243, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2245 = torch.aten.clone %2244, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2246 = torch.aten.view %2245, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2247 = torch.aten.transpose.int %82, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2248 = torch.aten.view %2246, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2249 = torch.aten.mm %2248, %2247 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2250 = torch.aten.mul.Scalar %81, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2251 = torch.aten.add.Tensor %2250, %2249, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2252 = torch.aten.view %2251, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2253 = torch.aten.add.Tensor %2187, %2252, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2254 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2255 = torch.aten.sum.dim_IntList %2253, %2254, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2256 = torch.aten.div.Scalar %2255, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2257 = torch.aten.size.int %2253, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2258 = torch.prim.ListConstruct %2257, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2259 = torch.aten.broadcast_to %2256, %2258 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2260 = torch.aten.sub.Tensor %2253, %2259, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2261 = torch.aten.mul.Tensor %2260, %2260 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2262 = torch.aten.sum.dim_IntList %2261, %2254, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2263 = torch.aten.div.Scalar %2262, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2264 = torch.aten.add.Scalar %2263, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2265 = torch.aten.rsqrt %2264 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2266 = torch.aten.size.int %2253, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2267 = torch.prim.ListConstruct %2266, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2268 = torch.aten.broadcast_to %2265, %2267 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2269 = torch.aten.mul.Tensor %2260, %2268 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2270 = torch.aten.mul.Tensor %2269, %80 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2271 = torch.aten.add.Tensor %2270, %79, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2272 = torch.aten.transpose.int %78, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2273 = torch.aten.view %2271, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2274 = torch.aten.mm %2273, %2272 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2275 = torch.aten.mul.Scalar %77, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2276 = torch.aten.add.Tensor %2275, %2274, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2277 = torch.aten.view %2276, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2278 = torch.aten.mul.Scalar %2277, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2279 = torch.aten.mul.Scalar %2277, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2280 = torch.aten.mul.Scalar %2277, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2281 = torch.aten.mul.Tensor %2280, %2277 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2282 = torch.aten.add.Scalar %2281, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2283 = torch.aten.mul.Tensor %2279, %2282 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2284 = torch.aten.tanh %2283 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2285 = torch.aten.add.Scalar %2284, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2286 = torch.aten.mul.Tensor %2278, %2285 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2287 = torch.aten.transpose.int %76, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2288 = torch.aten.view %2286, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2289 = torch.aten.mm %2288, %2287 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2290 = torch.aten.mul.Scalar %75, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2291 = torch.aten.add.Tensor %2290, %2289, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2292 = torch.aten.view %2291, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2293 = torch.aten.add.Tensor %2253, %2292, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2294 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2295 = torch.aten.sum.dim_IntList %2293, %2294, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2296 = torch.aten.div.Scalar %2295, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2297 = torch.aten.size.int %2293, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2298 = torch.prim.ListConstruct %2297, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2299 = torch.aten.broadcast_to %2296, %2298 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2300 = torch.aten.sub.Tensor %2293, %2299, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2301 = torch.aten.mul.Tensor %2300, %2300 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2302 = torch.aten.sum.dim_IntList %2301, %2294, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2303 = torch.aten.div.Scalar %2302, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2304 = torch.aten.add.Scalar %2303, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2305 = torch.aten.rsqrt %2304 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2306 = torch.aten.size.int %2293, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2307 = torch.prim.ListConstruct %2306, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2308 = torch.aten.broadcast_to %2305, %2307 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2309 = torch.aten.mul.Tensor %2300, %2308 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2310 = torch.aten.mul.Tensor %2309, %74 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2311 = torch.aten.add.Tensor %2310, %73, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2312 = torch.aten.transpose.int %72, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2313 = torch.aten.view %2311, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2314 = torch.aten.mm %2313, %2312 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2315 = torch.aten.mul.Scalar %71, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2316 = torch.aten.add.Tensor %2315, %2314, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2317 = torch.aten.view %2316, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2318 = torch.aten.view %2317, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2319 = torch.aten.slice.Tensor %2318, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2320 = torch.aten.slice.Tensor %2318, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2321 = torch.aten.slice.Tensor %2318, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2322 = torch.aten.transpose.int %2319, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2323 = torch.aten.view %2322, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2324 = torch.aten.permute %2320, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2325 = torch.aten.view %2324, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2326 = torch.aten.bmm %2323, %2325 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2327 = torch.aten.mul.Scalar %2326, %float6.944440e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2328 = torch.aten.mul.Scalar %346, %float5.555560e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2329 = torch.aten.add.Tensor %2327, %2328, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2330 = torch.aten.view %2329, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2331 = torch.aten.mul.Scalar %2330, %int18 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2332 = torch.aten.add.Tensor %2331, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2333 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2334 = torch.aten.maximum %2332, %2333 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_36, %indices_37 = torch.aten.max.dim %2334, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2335 = torch.aten.sub.Tensor %2334, %values_36, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2336 = torch.aten.exp %2335 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2337 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2338 = torch.aten.sum.dim_IntList %2336, %2337, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2339 = torch.aten.div.Tensor %2336, %2338 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2340 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2341 = torch.aten.empty.memory_format %2340, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2342 = torch.valsem.aten.copy %2341, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2343 = torch.aten.bitwise_not %2342 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2344 = torch.aten.mul.Tensor %2339, %2343 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2345 = torch.aten.view %2344, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2346 = torch.aten.transpose.int %2321, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2347 = torch.aten.view %2346, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2348 = torch.aten.bmm %2345, %2347 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2349 = torch.aten.view %2348, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2350 = torch.aten.permute %2349, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2351 = torch.aten.clone %2350, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2352 = torch.aten.view %2351, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2353 = torch.aten.transpose.int %70, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2354 = torch.aten.view %2352, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2355 = torch.aten.mm %2354, %2353 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2356 = torch.aten.mul.Scalar %69, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2357 = torch.aten.add.Tensor %2356, %2355, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2358 = torch.aten.view %2357, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2359 = torch.aten.add.Tensor %2293, %2358, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2360 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2361 = torch.aten.sum.dim_IntList %2359, %2360, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2362 = torch.aten.div.Scalar %2361, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2363 = torch.aten.size.int %2359, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2364 = torch.prim.ListConstruct %2363, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2365 = torch.aten.broadcast_to %2362, %2364 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2366 = torch.aten.sub.Tensor %2359, %2365, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2367 = torch.aten.mul.Tensor %2366, %2366 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2368 = torch.aten.sum.dim_IntList %2367, %2360, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2369 = torch.aten.div.Scalar %2368, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2370 = torch.aten.add.Scalar %2369, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2371 = torch.aten.rsqrt %2370 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2372 = torch.aten.size.int %2359, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2373 = torch.prim.ListConstruct %2372, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2374 = torch.aten.broadcast_to %2371, %2373 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2375 = torch.aten.mul.Tensor %2366, %2374 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2376 = torch.aten.mul.Tensor %2375, %68 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2377 = torch.aten.add.Tensor %2376, %67, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2378 = torch.aten.transpose.int %66, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2379 = torch.aten.view %2377, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2380 = torch.aten.mm %2379, %2378 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2381 = torch.aten.mul.Scalar %65, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2382 = torch.aten.add.Tensor %2381, %2380, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2383 = torch.aten.view %2382, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2384 = torch.aten.mul.Scalar %2383, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2385 = torch.aten.mul.Scalar %2383, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2386 = torch.aten.mul.Scalar %2383, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2387 = torch.aten.mul.Tensor %2386, %2383 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2388 = torch.aten.add.Scalar %2387, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2389 = torch.aten.mul.Tensor %2385, %2388 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2390 = torch.aten.tanh %2389 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2391 = torch.aten.add.Scalar %2390, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2392 = torch.aten.mul.Tensor %2384, %2391 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2393 = torch.aten.transpose.int %64, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2394 = torch.aten.view %2392, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2395 = torch.aten.mm %2394, %2393 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2396 = torch.aten.mul.Scalar %63, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2397 = torch.aten.add.Tensor %2396, %2395, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2398 = torch.aten.view %2397, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2399 = torch.aten.add.Tensor %2359, %2398, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2400 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2401 = torch.aten.sum.dim_IntList %2399, %2400, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2402 = torch.aten.div.Scalar %2401, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2403 = torch.aten.size.int %2399, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2404 = torch.prim.ListConstruct %2403, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2405 = torch.aten.broadcast_to %2402, %2404 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2406 = torch.aten.sub.Tensor %2399, %2405, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2407 = torch.aten.mul.Tensor %2406, %2406 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2408 = torch.aten.sum.dim_IntList %2407, %2400, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2409 = torch.aten.div.Scalar %2408, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2410 = torch.aten.add.Scalar %2409, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2411 = torch.aten.rsqrt %2410 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2412 = torch.aten.size.int %2399, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2413 = torch.prim.ListConstruct %2412, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2414 = torch.aten.broadcast_to %2411, %2413 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2415 = torch.aten.mul.Tensor %2406, %2414 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2416 = torch.aten.mul.Tensor %2415, %62 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2417 = torch.aten.add.Tensor %2416, %61, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2418 = torch.aten.transpose.int %60, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2419 = torch.aten.view %2417, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2420 = torch.aten.mm %2419, %2418 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2421 = torch.aten.mul.Scalar %59, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2422 = torch.aten.add.Tensor %2421, %2420, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2423 = torch.aten.view %2422, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2424 = torch.aten.view %2423, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2425 = torch.aten.slice.Tensor %2424, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2426 = torch.aten.slice.Tensor %2424, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2427 = torch.aten.slice.Tensor %2424, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2428 = torch.aten.transpose.int %2425, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2429 = torch.aten.view %2428, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2430 = torch.aten.permute %2426, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2431 = torch.aten.view %2430, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2432 = torch.aten.bmm %2429, %2431 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2433 = torch.aten.mul.Scalar %2432, %float6.578940e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2434 = torch.aten.mul.Scalar %346, %float5.263160e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2435 = torch.aten.add.Tensor %2433, %2434, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2436 = torch.aten.view %2435, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2437 = torch.aten.mul.Scalar %2436, %int19 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2438 = torch.aten.add.Tensor %2437, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2439 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2440 = torch.aten.maximum %2438, %2439 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_38, %indices_39 = torch.aten.max.dim %2440, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2441 = torch.aten.sub.Tensor %2440, %values_38, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2442 = torch.aten.exp %2441 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2443 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2444 = torch.aten.sum.dim_IntList %2442, %2443, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2445 = torch.aten.div.Tensor %2442, %2444 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2446 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2447 = torch.aten.empty.memory_format %2446, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2448 = torch.valsem.aten.copy %2447, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2449 = torch.aten.bitwise_not %2448 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2450 = torch.aten.mul.Tensor %2445, %2449 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2451 = torch.aten.view %2450, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2452 = torch.aten.transpose.int %2427, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2453 = torch.aten.view %2452, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2454 = torch.aten.bmm %2451, %2453 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2455 = torch.aten.view %2454, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2456 = torch.aten.permute %2455, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2457 = torch.aten.clone %2456, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2458 = torch.aten.view %2457, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2459 = torch.aten.transpose.int %58, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2460 = torch.aten.view %2458, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2461 = torch.aten.mm %2460, %2459 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2462 = torch.aten.mul.Scalar %57, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2463 = torch.aten.add.Tensor %2462, %2461, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2464 = torch.aten.view %2463, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2465 = torch.aten.add.Tensor %2399, %2464, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2466 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2467 = torch.aten.sum.dim_IntList %2465, %2466, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2468 = torch.aten.div.Scalar %2467, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2469 = torch.aten.size.int %2465, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2470 = torch.prim.ListConstruct %2469, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2471 = torch.aten.broadcast_to %2468, %2470 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2472 = torch.aten.sub.Tensor %2465, %2471, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2473 = torch.aten.mul.Tensor %2472, %2472 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2474 = torch.aten.sum.dim_IntList %2473, %2466, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2475 = torch.aten.div.Scalar %2474, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2476 = torch.aten.add.Scalar %2475, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2477 = torch.aten.rsqrt %2476 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2478 = torch.aten.size.int %2465, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2479 = torch.prim.ListConstruct %2478, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2480 = torch.aten.broadcast_to %2477, %2479 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2481 = torch.aten.mul.Tensor %2472, %2480 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2482 = torch.aten.mul.Tensor %2481, %56 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2483 = torch.aten.add.Tensor %2482, %55, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2484 = torch.aten.transpose.int %54, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2485 = torch.aten.view %2483, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2486 = torch.aten.mm %2485, %2484 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2487 = torch.aten.mul.Scalar %53, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2488 = torch.aten.add.Tensor %2487, %2486, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2489 = torch.aten.view %2488, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2490 = torch.aten.mul.Scalar %2489, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2491 = torch.aten.mul.Scalar %2489, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2492 = torch.aten.mul.Scalar %2489, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2493 = torch.aten.mul.Tensor %2492, %2489 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2494 = torch.aten.add.Scalar %2493, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2495 = torch.aten.mul.Tensor %2491, %2494 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2496 = torch.aten.tanh %2495 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2497 = torch.aten.add.Scalar %2496, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2498 = torch.aten.mul.Tensor %2490, %2497 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2499 = torch.aten.transpose.int %52, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2500 = torch.aten.view %2498, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2501 = torch.aten.mm %2500, %2499 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2502 = torch.aten.mul.Scalar %51, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2503 = torch.aten.add.Tensor %2502, %2501, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2504 = torch.aten.view %2503, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2505 = torch.aten.add.Tensor %2465, %2504, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2506 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2507 = torch.aten.sum.dim_IntList %2505, %2506, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2508 = torch.aten.div.Scalar %2507, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2509 = torch.aten.size.int %2505, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2510 = torch.prim.ListConstruct %2509, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2511 = torch.aten.broadcast_to %2508, %2510 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2512 = torch.aten.sub.Tensor %2505, %2511, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2513 = torch.aten.mul.Tensor %2512, %2512 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2514 = torch.aten.sum.dim_IntList %2513, %2506, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2515 = torch.aten.div.Scalar %2514, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2516 = torch.aten.add.Scalar %2515, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2517 = torch.aten.rsqrt %2516 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2518 = torch.aten.size.int %2505, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2519 = torch.prim.ListConstruct %2518, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2520 = torch.aten.broadcast_to %2517, %2519 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2521 = torch.aten.mul.Tensor %2512, %2520 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2522 = torch.aten.mul.Tensor %2521, %50 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2523 = torch.aten.add.Tensor %2522, %49, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2524 = torch.aten.transpose.int %48, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2525 = torch.aten.view %2523, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2526 = torch.aten.mm %2525, %2524 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2527 = torch.aten.mul.Scalar %47, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2528 = torch.aten.add.Tensor %2527, %2526, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2529 = torch.aten.view %2528, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2530 = torch.aten.view %2529, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2531 = torch.aten.slice.Tensor %2530, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2532 = torch.aten.slice.Tensor %2530, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2533 = torch.aten.slice.Tensor %2530, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2534 = torch.aten.transpose.int %2531, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2535 = torch.aten.view %2534, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2536 = torch.aten.permute %2532, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2537 = torch.aten.view %2536, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2538 = torch.aten.bmm %2535, %2537 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2539 = torch.aten.mul.Scalar %2538, %float6.250000e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2540 = torch.aten.mul.Scalar %346, %float5.000000e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2541 = torch.aten.add.Tensor %2539, %2540, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2542 = torch.aten.view %2541, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2543 = torch.aten.mul.Scalar %2542, %int20 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2544 = torch.aten.add.Tensor %2543, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2545 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2546 = torch.aten.maximum %2544, %2545 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_40, %indices_41 = torch.aten.max.dim %2546, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2547 = torch.aten.sub.Tensor %2546, %values_40, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2548 = torch.aten.exp %2547 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2549 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2550 = torch.aten.sum.dim_IntList %2548, %2549, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2551 = torch.aten.div.Tensor %2548, %2550 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2552 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2553 = torch.aten.empty.memory_format %2552, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2554 = torch.valsem.aten.copy %2553, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2555 = torch.aten.bitwise_not %2554 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2556 = torch.aten.mul.Tensor %2551, %2555 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2557 = torch.aten.view %2556, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2558 = torch.aten.transpose.int %2533, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2559 = torch.aten.view %2558, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2560 = torch.aten.bmm %2557, %2559 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2561 = torch.aten.view %2560, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2562 = torch.aten.permute %2561, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2563 = torch.aten.clone %2562, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2564 = torch.aten.view %2563, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2565 = torch.aten.transpose.int %46, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2566 = torch.aten.view %2564, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2567 = torch.aten.mm %2566, %2565 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2568 = torch.aten.mul.Scalar %45, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2569 = torch.aten.add.Tensor %2568, %2567, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2570 = torch.aten.view %2569, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2571 = torch.aten.add.Tensor %2505, %2570, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2572 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2573 = torch.aten.sum.dim_IntList %2571, %2572, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2574 = torch.aten.div.Scalar %2573, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2575 = torch.aten.size.int %2571, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2576 = torch.prim.ListConstruct %2575, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2577 = torch.aten.broadcast_to %2574, %2576 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2578 = torch.aten.sub.Tensor %2571, %2577, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2579 = torch.aten.mul.Tensor %2578, %2578 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2580 = torch.aten.sum.dim_IntList %2579, %2572, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2581 = torch.aten.div.Scalar %2580, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2582 = torch.aten.add.Scalar %2581, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2583 = torch.aten.rsqrt %2582 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2584 = torch.aten.size.int %2571, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2585 = torch.prim.ListConstruct %2584, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2586 = torch.aten.broadcast_to %2583, %2585 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2587 = torch.aten.mul.Tensor %2578, %2586 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2588 = torch.aten.mul.Tensor %2587, %44 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2589 = torch.aten.add.Tensor %2588, %43, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2590 = torch.aten.transpose.int %42, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2591 = torch.aten.view %2589, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2592 = torch.aten.mm %2591, %2590 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2593 = torch.aten.mul.Scalar %41, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2594 = torch.aten.add.Tensor %2593, %2592, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2595 = torch.aten.view %2594, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2596 = torch.aten.mul.Scalar %2595, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2597 = torch.aten.mul.Scalar %2595, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2598 = torch.aten.mul.Scalar %2595, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2599 = torch.aten.mul.Tensor %2598, %2595 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2600 = torch.aten.add.Scalar %2599, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2601 = torch.aten.mul.Tensor %2597, %2600 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2602 = torch.aten.tanh %2601 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2603 = torch.aten.add.Scalar %2602, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2604 = torch.aten.mul.Tensor %2596, %2603 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2605 = torch.aten.transpose.int %40, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2606 = torch.aten.view %2604, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2607 = torch.aten.mm %2606, %2605 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2608 = torch.aten.mul.Scalar %39, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2609 = torch.aten.add.Tensor %2608, %2607, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2610 = torch.aten.view %2609, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2611 = torch.aten.add.Tensor %2571, %2610, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2612 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2613 = torch.aten.sum.dim_IntList %2611, %2612, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2614 = torch.aten.div.Scalar %2613, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2615 = torch.aten.size.int %2611, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2616 = torch.prim.ListConstruct %2615, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2617 = torch.aten.broadcast_to %2614, %2616 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2618 = torch.aten.sub.Tensor %2611, %2617, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2619 = torch.aten.mul.Tensor %2618, %2618 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2620 = torch.aten.sum.dim_IntList %2619, %2612, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2621 = torch.aten.div.Scalar %2620, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2622 = torch.aten.add.Scalar %2621, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2623 = torch.aten.rsqrt %2622 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2624 = torch.aten.size.int %2611, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2625 = torch.prim.ListConstruct %2624, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2626 = torch.aten.broadcast_to %2623, %2625 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2627 = torch.aten.mul.Tensor %2618, %2626 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2628 = torch.aten.mul.Tensor %2627, %38 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2629 = torch.aten.add.Tensor %2628, %37, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2630 = torch.aten.transpose.int %36, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2631 = torch.aten.view %2629, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2632 = torch.aten.mm %2631, %2630 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2633 = torch.aten.mul.Scalar %35, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2634 = torch.aten.add.Tensor %2633, %2632, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2635 = torch.aten.view %2634, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2636 = torch.aten.view %2635, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2637 = torch.aten.slice.Tensor %2636, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2638 = torch.aten.slice.Tensor %2636, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2639 = torch.aten.slice.Tensor %2636, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2640 = torch.aten.transpose.int %2637, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2641 = torch.aten.view %2640, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2642 = torch.aten.permute %2638, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2643 = torch.aten.view %2642, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2644 = torch.aten.bmm %2641, %2643 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2645 = torch.aten.mul.Scalar %2644, %float5.952380e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2646 = torch.aten.mul.Scalar %346, %float4.761900e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2647 = torch.aten.add.Tensor %2645, %2646, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2648 = torch.aten.view %2647, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2649 = torch.aten.mul.Scalar %2648, %int21 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2650 = torch.aten.add.Tensor %2649, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2651 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2652 = torch.aten.maximum %2650, %2651 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_42, %indices_43 = torch.aten.max.dim %2652, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2653 = torch.aten.sub.Tensor %2652, %values_42, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2654 = torch.aten.exp %2653 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2655 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2656 = torch.aten.sum.dim_IntList %2654, %2655, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2657 = torch.aten.div.Tensor %2654, %2656 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2658 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2659 = torch.aten.empty.memory_format %2658, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2660 = torch.valsem.aten.copy %2659, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2661 = torch.aten.bitwise_not %2660 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2662 = torch.aten.mul.Tensor %2657, %2661 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2663 = torch.aten.view %2662, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2664 = torch.aten.transpose.int %2639, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2665 = torch.aten.view %2664, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2666 = torch.aten.bmm %2663, %2665 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2667 = torch.aten.view %2666, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2668 = torch.aten.permute %2667, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2669 = torch.aten.clone %2668, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2670 = torch.aten.view %2669, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2671 = torch.aten.transpose.int %34, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2672 = torch.aten.view %2670, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2673 = torch.aten.mm %2672, %2671 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2674 = torch.aten.mul.Scalar %33, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2675 = torch.aten.add.Tensor %2674, %2673, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2676 = torch.aten.view %2675, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2677 = torch.aten.add.Tensor %2611, %2676, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2678 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2679 = torch.aten.sum.dim_IntList %2677, %2678, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2680 = torch.aten.div.Scalar %2679, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2681 = torch.aten.size.int %2677, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2682 = torch.prim.ListConstruct %2681, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2683 = torch.aten.broadcast_to %2680, %2682 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2684 = torch.aten.sub.Tensor %2677, %2683, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2685 = torch.aten.mul.Tensor %2684, %2684 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2686 = torch.aten.sum.dim_IntList %2685, %2678, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2687 = torch.aten.div.Scalar %2686, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2688 = torch.aten.add.Scalar %2687, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2689 = torch.aten.rsqrt %2688 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2690 = torch.aten.size.int %2677, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2691 = torch.prim.ListConstruct %2690, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2692 = torch.aten.broadcast_to %2689, %2691 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2693 = torch.aten.mul.Tensor %2684, %2692 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2694 = torch.aten.mul.Tensor %2693, %32 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2695 = torch.aten.add.Tensor %2694, %31, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2696 = torch.aten.transpose.int %30, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2697 = torch.aten.view %2695, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2698 = torch.aten.mm %2697, %2696 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2699 = torch.aten.mul.Scalar %29, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2700 = torch.aten.add.Tensor %2699, %2698, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2701 = torch.aten.view %2700, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2702 = torch.aten.mul.Scalar %2701, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2703 = torch.aten.mul.Scalar %2701, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2704 = torch.aten.mul.Scalar %2701, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2705 = torch.aten.mul.Tensor %2704, %2701 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2706 = torch.aten.add.Scalar %2705, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2707 = torch.aten.mul.Tensor %2703, %2706 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2708 = torch.aten.tanh %2707 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2709 = torch.aten.add.Scalar %2708, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2710 = torch.aten.mul.Tensor %2702, %2709 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2711 = torch.aten.transpose.int %28, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2712 = torch.aten.view %2710, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2713 = torch.aten.mm %2712, %2711 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2714 = torch.aten.mul.Scalar %27, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2715 = torch.aten.add.Tensor %2714, %2713, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2716 = torch.aten.view %2715, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2717 = torch.aten.add.Tensor %2677, %2716, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2718 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2719 = torch.aten.sum.dim_IntList %2717, %2718, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2720 = torch.aten.div.Scalar %2719, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2721 = torch.aten.size.int %2717, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2722 = torch.prim.ListConstruct %2721, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2723 = torch.aten.broadcast_to %2720, %2722 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2724 = torch.aten.sub.Tensor %2717, %2723, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2725 = torch.aten.mul.Tensor %2724, %2724 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2726 = torch.aten.sum.dim_IntList %2725, %2718, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2727 = torch.aten.div.Scalar %2726, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2728 = torch.aten.add.Scalar %2727, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2729 = torch.aten.rsqrt %2728 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2730 = torch.aten.size.int %2717, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2731 = torch.prim.ListConstruct %2730, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2732 = torch.aten.broadcast_to %2729, %2731 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2733 = torch.aten.mul.Tensor %2724, %2732 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2734 = torch.aten.mul.Tensor %2733, %26 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2735 = torch.aten.add.Tensor %2734, %25, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2736 = torch.aten.transpose.int %24, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2737 = torch.aten.view %2735, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2738 = torch.aten.mm %2737, %2736 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2739 = torch.aten.mul.Scalar %23, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2740 = torch.aten.add.Tensor %2739, %2738, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2741 = torch.aten.view %2740, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2742 = torch.aten.view %2741, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2743 = torch.aten.slice.Tensor %2742, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2744 = torch.aten.slice.Tensor %2742, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2745 = torch.aten.slice.Tensor %2742, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2746 = torch.aten.transpose.int %2743, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2747 = torch.aten.view %2746, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2748 = torch.aten.permute %2744, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2749 = torch.aten.view %2748, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2750 = torch.aten.bmm %2747, %2749 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2751 = torch.aten.mul.Scalar %2750, %float5.681820e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2752 = torch.aten.mul.Scalar %346, %float4.545450e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2753 = torch.aten.add.Tensor %2751, %2752, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2754 = torch.aten.view %2753, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2755 = torch.aten.mul.Scalar %2754, %int22 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2756 = torch.aten.add.Tensor %2755, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2757 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2758 = torch.aten.maximum %2756, %2757 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_44, %indices_45 = torch.aten.max.dim %2758, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2759 = torch.aten.sub.Tensor %2758, %values_44, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2760 = torch.aten.exp %2759 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2761 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2762 = torch.aten.sum.dim_IntList %2760, %2761, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2763 = torch.aten.div.Tensor %2760, %2762 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2764 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2765 = torch.aten.empty.memory_format %2764, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2766 = torch.valsem.aten.copy %2765, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2767 = torch.aten.bitwise_not %2766 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2768 = torch.aten.mul.Tensor %2763, %2767 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2769 = torch.aten.view %2768, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2770 = torch.aten.transpose.int %2745, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2771 = torch.aten.view %2770, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2772 = torch.aten.bmm %2769, %2771 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2773 = torch.aten.view %2772, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2774 = torch.aten.permute %2773, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2775 = torch.aten.clone %2774, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2776 = torch.aten.view %2775, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2777 = torch.aten.transpose.int %22, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2778 = torch.aten.view %2776, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2779 = torch.aten.mm %2778, %2777 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2780 = torch.aten.mul.Scalar %21, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2781 = torch.aten.add.Tensor %2780, %2779, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2782 = torch.aten.view %2781, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2783 = torch.aten.add.Tensor %2717, %2782, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2784 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2785 = torch.aten.sum.dim_IntList %2783, %2784, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2786 = torch.aten.div.Scalar %2785, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2787 = torch.aten.size.int %2783, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2788 = torch.prim.ListConstruct %2787, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2789 = torch.aten.broadcast_to %2786, %2788 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2790 = torch.aten.sub.Tensor %2783, %2789, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2791 = torch.aten.mul.Tensor %2790, %2790 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2792 = torch.aten.sum.dim_IntList %2791, %2784, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2793 = torch.aten.div.Scalar %2792, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2794 = torch.aten.add.Scalar %2793, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2795 = torch.aten.rsqrt %2794 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2796 = torch.aten.size.int %2783, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2797 = torch.prim.ListConstruct %2796, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2798 = torch.aten.broadcast_to %2795, %2797 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2799 = torch.aten.mul.Tensor %2790, %2798 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2800 = torch.aten.mul.Tensor %2799, %20 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2801 = torch.aten.add.Tensor %2800, %19, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2802 = torch.aten.transpose.int %18, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2803 = torch.aten.view %2801, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2804 = torch.aten.mm %2803, %2802 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2805 = torch.aten.mul.Scalar %17, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2806 = torch.aten.add.Tensor %2805, %2804, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2807 = torch.aten.view %2806, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2808 = torch.aten.mul.Scalar %2807, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2809 = torch.aten.mul.Scalar %2807, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2810 = torch.aten.mul.Scalar %2807, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2811 = torch.aten.mul.Tensor %2810, %2807 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2812 = torch.aten.add.Scalar %2811, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2813 = torch.aten.mul.Tensor %2809, %2812 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2814 = torch.aten.tanh %2813 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2815 = torch.aten.add.Scalar %2814, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2816 = torch.aten.mul.Tensor %2808, %2815 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2817 = torch.aten.transpose.int %16, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2818 = torch.aten.view %2816, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2819 = torch.aten.mm %2818, %2817 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2820 = torch.aten.mul.Scalar %15, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2821 = torch.aten.add.Tensor %2820, %2819, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2822 = torch.aten.view %2821, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2823 = torch.aten.add.Tensor %2783, %2822, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2824 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2825 = torch.aten.sum.dim_IntList %2823, %2824, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2826 = torch.aten.div.Scalar %2825, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2827 = torch.aten.size.int %2823, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2828 = torch.prim.ListConstruct %2827, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2829 = torch.aten.broadcast_to %2826, %2828 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2830 = torch.aten.sub.Tensor %2823, %2829, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2831 = torch.aten.mul.Tensor %2830, %2830 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2832 = torch.aten.sum.dim_IntList %2831, %2824, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2833 = torch.aten.div.Scalar %2832, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2834 = torch.aten.add.Scalar %2833, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2835 = torch.aten.rsqrt %2834 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2836 = torch.aten.size.int %2823, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2837 = torch.prim.ListConstruct %2836, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2838 = torch.aten.broadcast_to %2835, %2837 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2839 = torch.aten.mul.Tensor %2830, %2838 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2840 = torch.aten.mul.Tensor %2839, %14 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2841 = torch.aten.add.Tensor %2840, %13, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2842 = torch.aten.transpose.int %12, %int0, %int1 : !torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,3072],f32> | |
| %2843 = torch.aten.view %2841, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2844 = torch.aten.mm %2843, %2842 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32> -> !torch.vtensor<[128,3072],f32> | |
| %2845 = torch.aten.mul.Scalar %11, %int1 : !torch.vtensor<[3072],f32>, !torch.int -> !torch.vtensor<[3072],f32> | |
| %2846 = torch.aten.add.Tensor %2845, %2844, %int1 : !torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int -> !torch.vtensor<[128,3072],f32> | |
| %2847 = torch.aten.view %2846, %397 : !torch.vtensor<[128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,3072],f32> | |
| %2848 = torch.aten.view %2847, %399 : !torch.vtensor<[1,128,3072],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,192],f32> | |
| %2849 = torch.aten.slice.Tensor %2848, %int-1, %int0, %int64, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2850 = torch.aten.slice.Tensor %2848, %int-1, %int64, %int128, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2851 = torch.aten.slice.Tensor %2848, %int-1, %int128, %int192, %int1 : !torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2852 = torch.aten.transpose.int %2849, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2853 = torch.aten.view %2852, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2854 = torch.aten.permute %2850, %407 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,64,128],f32> | |
| %2855 = torch.aten.view %2854, %409 : !torch.vtensor<[1,16,64,128],f32>, !torch.list<int> -> !torch.vtensor<[16,64,128],f32> | |
| %2856 = torch.aten.bmm %2853, %2855 : !torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32> -> !torch.vtensor<[16,128,128],f32> | |
| %2857 = torch.aten.mul.Scalar %2856, %float5.434780e-03 : !torch.vtensor<[16,128,128],f32>, !torch.float -> !torch.vtensor<[16,128,128],f32> | |
| %2858 = torch.aten.mul.Scalar %346, %float4.347830e-02 : !torch.vtensor<[16,1,128],f32>, !torch.float -> !torch.vtensor<[16,1,128],f32> | |
| %2859 = torch.aten.add.Tensor %2857, %2858, %int1 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int -> !torch.vtensor<[16,128,128],f32> | |
| %2860 = torch.aten.view %2859, %415 : !torch.vtensor<[16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2861 = torch.aten.mul.Scalar %2860, %int23 : !torch.vtensor<[1,16,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2862 = torch.aten.add.Tensor %2861, %372, %int1 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int -> !torch.vtensor<[1,16,128,128],f32> | |
| %2863 = torch.aten.clone %287, %none : !torch.vtensor<[],f32>, !torch.none -> !torch.vtensor<[],f32> | |
| %2864 = torch.aten.maximum %2862, %2863 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %values_46, %indices_47 = torch.aten.max.dim %2864, %int-1, %true : !torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool -> !torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64> | |
| %2865 = torch.aten.sub.Tensor %2864, %values_46, %float1.000000e00 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float -> !torch.vtensor<[1,16,128,128],f32> | |
| %2866 = torch.aten.exp %2865 : !torch.vtensor<[1,16,128,128],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2867 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2868 = torch.aten.sum.dim_IntList %2866, %2867, %true, %none : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[1,16,128,1],f32> | |
| %2869 = torch.aten.div.Tensor %2866, %2868 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2870 = torch.prim.ListConstruct %int1, %int1, %int128, %int128 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2871 = torch.aten.empty.memory_format %2870, %int11, %none, %none, %none, %none : !torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none -> !torch.vtensor<[1,1,128,128],i1> | |
| %2872 = torch.valsem.aten.copy %2871, %372, %false : !torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool -> !torch.vtensor<[1,1,128,128],i1> | |
| %2873 = torch.aten.bitwise_not %2872 : !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,1,128,128],i1> | |
| %2874 = torch.aten.mul.Tensor %2869, %2873 : !torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1> -> !torch.vtensor<[1,16,128,128],f32> | |
| %2875 = torch.aten.view %2874, %431 : !torch.vtensor<[1,16,128,128],f32>, !torch.list<int> -> !torch.vtensor<[16,128,128],f32> | |
| %2876 = torch.aten.transpose.int %2851, %int1, %int2 : !torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,16,128,64],f32> | |
| %2877 = torch.aten.view %2876, %405 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[16,128,64],f32> | |
| %2878 = torch.aten.bmm %2875, %2877 : !torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32> -> !torch.vtensor<[16,128,64],f32> | |
| %2879 = torch.aten.view %2878, %436 : !torch.vtensor<[16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,16,128,64],f32> | |
| %2880 = torch.aten.permute %2879, %438 : !torch.vtensor<[1,16,128,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,16,64],f32> | |
| %2881 = torch.aten.clone %2880, %int0 : !torch.vtensor<[1,128,16,64],f32>, !torch.int -> !torch.vtensor<[1,128,16,64],f32> | |
| %2882 = torch.aten.view %2881, %441 : !torch.vtensor<[1,128,16,64],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2883 = torch.aten.transpose.int %10, %int0, %int1 : !torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,1024],f32> | |
| %2884 = torch.aten.view %2882, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2885 = torch.aten.mm %2884, %2883 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2886 = torch.aten.mul.Scalar %9, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2887 = torch.aten.add.Tensor %2886, %2885, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2888 = torch.aten.view %2887, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2889 = torch.aten.add.Tensor %2823, %2888, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2890 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2891 = torch.aten.sum.dim_IntList %2889, %2890, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2892 = torch.aten.div.Scalar %2891, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2893 = torch.aten.size.int %2889, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2894 = torch.prim.ListConstruct %2893, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2895 = torch.aten.broadcast_to %2892, %2894 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2896 = torch.aten.sub.Tensor %2889, %2895, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2897 = torch.aten.mul.Tensor %2896, %2896 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2898 = torch.aten.sum.dim_IntList %2897, %2890, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2899 = torch.aten.div.Scalar %2898, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2900 = torch.aten.add.Scalar %2899, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2901 = torch.aten.rsqrt %2900 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2902 = torch.aten.size.int %2889, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2903 = torch.prim.ListConstruct %2902, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2904 = torch.aten.broadcast_to %2901, %2903 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2905 = torch.aten.mul.Tensor %2896, %2904 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2906 = torch.aten.mul.Tensor %2905, %8 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2907 = torch.aten.add.Tensor %2906, %7, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2908 = torch.aten.transpose.int %6, %int0, %int1 : !torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,4096],f32> | |
| %2909 = torch.aten.view %2907, %392 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2910 = torch.aten.mm %2909, %2908 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32> -> !torch.vtensor<[128,4096],f32> | |
| %2911 = torch.aten.mul.Scalar %5, %int1 : !torch.vtensor<[4096],f32>, !torch.int -> !torch.vtensor<[4096],f32> | |
| %2912 = torch.aten.add.Tensor %2911, %2910, %int1 : !torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int -> !torch.vtensor<[128,4096],f32> | |
| %2913 = torch.aten.view %2912, %473 : !torch.vtensor<[128,4096],f32>, !torch.list<int> -> !torch.vtensor<[1,128,4096],f32> | |
| %2914 = torch.aten.mul.Scalar %2913, %float5.000000e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2915 = torch.aten.mul.Scalar %2913, %float7.978850e-01 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2916 = torch.aten.mul.Scalar %2913, %float4.471500e-02 : !torch.vtensor<[1,128,4096],f32>, !torch.float -> !torch.vtensor<[1,128,4096],f32> | |
| %2917 = torch.aten.mul.Tensor %2916, %2913 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2918 = torch.aten.add.Scalar %2917, %int1, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2919 = torch.aten.mul.Tensor %2915, %2918 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2920 = torch.aten.tanh %2919 : !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2921 = torch.aten.add.Scalar %2920, %float1.000000e00, %int1 : !torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int -> !torch.vtensor<[1,128,4096],f32> | |
| %2922 = torch.aten.mul.Tensor %2914, %2921 : !torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32> -> !torch.vtensor<[1,128,4096],f32> | |
| %2923 = torch.aten.transpose.int %4, %int0, %int1 : !torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int -> !torch.vtensor<[4096,1024],f32> | |
| %2924 = torch.aten.view %2922, %485 : !torch.vtensor<[1,128,4096],f32>, !torch.list<int> -> !torch.vtensor<[128,4096],f32> | |
| %2925 = torch.aten.mm %2924, %2923 : !torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32> -> !torch.vtensor<[128,1024],f32> | |
| %2926 = torch.aten.mul.Scalar %3, %int1 : !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[1024],f32> | |
| %2927 = torch.aten.add.Tensor %2926, %2925, %int1 : !torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int -> !torch.vtensor<[128,1024],f32> | |
| %2928 = torch.aten.view %2927, %441 : !torch.vtensor<[128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2929 = torch.aten.add.Tensor %2889, %2928, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2930 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
| %2931 = torch.aten.sum.dim_IntList %2929, %2930, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2932 = torch.aten.div.Scalar %2931, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2933 = torch.aten.size.int %2929, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2934 = torch.prim.ListConstruct %2933, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2935 = torch.aten.broadcast_to %2932, %2934 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2936 = torch.aten.sub.Tensor %2929, %2935, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2937 = torch.aten.mul.Tensor %2936, %2936 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2938 = torch.aten.sum.dim_IntList %2937, %2930, %true, %none : !torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?,128,1],f32> | |
| %2939 = torch.aten.div.Scalar %2938, %int1024 : !torch.vtensor<[?,128,1],f32>, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2940 = torch.aten.add.Scalar %2939, %float1.000000e-05, %int1 : !torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[?,128,1],f32> | |
| %2941 = torch.aten.rsqrt %2940 : !torch.vtensor<[?,128,1],f32> -> !torch.vtensor<[?,128,1],f32> | |
| %2942 = torch.aten.size.int %2929, %int0 : !torch.vtensor<[?,128,1024],f32>, !torch.int -> !torch.int | |
| %2943 = torch.prim.ListConstruct %2942, %int128, %int1024 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2944 = torch.aten.broadcast_to %2941, %2943 : !torch.vtensor<[?,128,1],f32>, !torch.list<int> -> !torch.vtensor<[?,128,1024],f32> | |
| %2945 = torch.aten.mul.Tensor %2936, %2944 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2946 = torch.aten.mul.Tensor %2945, %2 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32> -> !torch.vtensor<[?,128,1024],f32> | |
| %2947 = torch.aten.add.Tensor %2946, %1, %int1 : !torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int -> !torch.vtensor<[?,128,1024],f32> | |
| %2948 = torch.aten.view %2947, %441 : !torch.vtensor<[?,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[1,128,1024],f32> | |
| %2949 = torch.aten.transpose.int %0, %int0, %int1 : !torch.vtensor<[2,1024],f32>, !torch.int, !torch.int -> !torch.vtensor<[1024,2],f32> | |
| %2950 = torch.aten.view %2948, %392 : !torch.vtensor<[1,128,1024],f32>, !torch.list<int> -> !torch.vtensor<[128,1024],f32> | |
| %2951 = torch.aten.mm %2950, %2949 : !torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,2],f32> -> !torch.vtensor<[128,2],f32> | |
| %2952 = torch.prim.ListConstruct %int1, %int128, %int2 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| %2953 = torch.aten.view %2951, %2952 : !torch.vtensor<[128,2],f32>, !torch.list<int> -> !torch.vtensor<[1,128,2],f32> | |
| %2954 = torch.aten.ne.Scalar %arg0, %int3 : !torch.vtensor<[?,?],si64>, !torch.int -> !torch.vtensor<[?,?],i1> | |
| %2955 = torch.prim.ListConstruct %int-1 : (!torch.int) -> !torch.list<int> | |
| %2956 = torch.aten.sum.dim_IntList %2954, %2955, %false, %none : !torch.vtensor<[?,?],i1>, !torch.list<int>, !torch.bool, !torch.none -> !torch.vtensor<[?],i1> | |
| %2957 = torch.aten.sub.Scalar %2956, %int1, %int1 : !torch.vtensor<[?],i1>, !torch.int, !torch.int -> !torch.vtensor<[?],si64> | |
| %2958 = torch.aten.arange.start_step %int0, %int1, %int1, %none, %none, %cpu, %false : !torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[?],si64> | |
| %2959 = torch.prim.ListConstruct %2958, %2957 : (!torch.vtensor<[?],si64>, !torch.vtensor<[?],si64>) -> !torch.list<vtensor> | |
| %2960 = torch.aten.index.Tensor %2953, %2959 : !torch.vtensor<[1,128,2],f32>, !torch.list<vtensor> -> !torch.vtensor<[?,2],f32> | |
| return %2960 : !torch.vtensor<[?,2],f32> | |
| } | |
| } -> SUCCESS | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f0410) { | |
| %423 = "torch.aten.slice.Tensor"(%422, %306, %306, %309, %2) : (!torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86f0580) { | |
| %424 = "torch.aten.unsqueeze"(%423, %2) : (!torch.vtensor<[1,128],f32>, !torch.int) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f0690) { | |
| %425 = "torch.aten.slice.Tensor"(%424, %307, %306, %309, %2) : (!torch.vtensor<[1,1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Scalar'(0x86f0800) { | |
| %426 = "torch.aten.sub.Scalar"(%425, %2, %2) : (!torch.vtensor<[1,1,128],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f0930) { | |
| %427 = "torch.aten.slice.Tensor"(%403, %306, %306, %309, %2) : (!torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86f0aa0) { | |
| %428 = "torch.aten.unsqueeze"(%427, %2) : (!torch.vtensor<[1,128],f32>, !torch.int) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f0bb0) { | |
| %429 = "torch.aten.mul.Tensor"(%426, %428) : (!torch.vtensor<[1,1,128],f32>, !torch.vtensor<[1,1,128],f32>) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86f1d90) { | |
| %430 = "torch.aten.unsqueeze"(%407, %303) : (!torch.vtensor<[?],f32>, !torch.int) -> !torch.vtensor<[?,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f1ea0) { | |
| %431 = "torch.aten.mul.Tensor"(%430, %429) : (!torch.vtensor<[?,1],f32>, !torch.vtensor<[1,1,128],f32>) -> !torch.vtensor<[1,?,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f1fb0) { | |
| %432 = "torch.aten.slice.Tensor"(%403, %306, %306, %309, %2) : (!torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86f2120) { | |
| %433 = "torch.aten.unsqueeze"(%432, %2) : (!torch.vtensor<[1,128],f32>, !torch.int) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f2230) { | |
| %434 = "torch.aten.mul.Tensor"(%431, %433) : (!torch.vtensor<[1,?,128],f32>, !torch.vtensor<[1,1,128],f32>) -> !torch.vtensor<[1,?,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f2340) { | |
| %435 = "torch.prim.ListConstruct"(%310, %2, %1) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f2470) { | |
| %436 = "torch.aten.view"(%434, %435) : (!torch.vtensor<[1,?,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f2580) { | |
| %437 = "torch.prim.ListConstruct"(%1, %1) : (!torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x86f2690) { | |
| %438 = "torch.aten.empty.memory_format"(%437, %300, %300, %380, %302, %300) : (!torch.list<int>, !torch.none, !torch.none, !torch.Device, !torch.bool, !torch.none) -> !torch.vtensor<[128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.fill.Scalar'(0x86f2820) { | |
| %439 = "torch.valsem.aten.fill.Scalar"(%438, %301) : (!torch.vtensor<[128,128],f32>, !torch.float) -> !torch.vtensor<[128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.arange.start_step'(0x86f2930) { | |
| %440 = "torch.aten.arange.start_step"(%306, %1, %2, %300, %300, %380, %302) : (!torch.int, !torch.int, !torch.int, !torch.none, !torch.none, !torch.Device, !torch.bool) -> !torch.vtensor<[?],si64> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86f2ae0) { | |
| %441 = "torch.aten.add.Scalar"(%440, %2, %2) : (!torch.vtensor<[?],si64>, !torch.int, !torch.int) -> !torch.vtensor<[?],si64> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f2c10) { | |
| %442 = "torch.prim.ListConstruct"(%1, %2) : (!torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f2d20) { | |
| %443 = "torch.aten.view"(%441, %442) : (!torch.vtensor<[?],si64>, !torch.list<int>) -> !torch.vtensor<[128,1],si64> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.lt.Tensor'(0x86f2e30) { | |
| %444 = "torch.aten.lt.Tensor"(%440, %443) : (!torch.vtensor<[?],si64>, !torch.vtensor<[128,1],si64>) -> !torch.vtensor<[128,?],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.masked_fill.Scalar'(0x86e3650) { | |
| %445 = "torch.aten.masked_fill.Scalar"(%439, %444, %306) : (!torch.vtensor<[128,128],f32>, !torch.vtensor<[128,?],i1>, !torch.int) -> !torch.vtensor<[128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86e3780) { | |
| %446 = "torch.aten.unsqueeze"(%445, %306) : (!torch.vtensor<[128,128],f32>, !torch.int) -> !torch.vtensor<[1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86e3890) { | |
| %447 = "torch.aten.unsqueeze"(%446, %2) : (!torch.vtensor<[1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86e39a0) { | |
| %448 = "torch.aten.slice.Tensor"(%447, %307, %306, %309, %2) : (!torch.vtensor<[1,1,128,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86e3b10) { | |
| %449 = "torch.aten.slice.Tensor"(%448, %311, %306, %309, %2) : (!torch.vtensor<[1,1,128,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86eedb0) { | |
| %450 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86f5700) { | |
| %451 = "torch.aten.broadcast_to"(%449, %450) : (!torch.vtensor<[1,1,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f58c0) { | |
| %452 = "torch.aten.slice.Tensor"(%403, %306, %306, %309, %2) : (!torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86f5a30) { | |
| %453 = "torch.aten.unsqueeze"(%452, %2) : (!torch.vtensor<[1,128],f32>, !torch.int) -> !torch.vtensor<[1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.unsqueeze'(0x86f5b40) { | |
| %454 = "torch.aten.unsqueeze"(%453, %307) : (!torch.vtensor<[1,1,128],f32>, !torch.int) -> !torch.vtensor<[1,1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f5c50) { | |
| %455 = "torch.aten.slice.Tensor"(%454, %311, %306, %309, %2) : (!torch.vtensor<[1,1,1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,1,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86f5dc0) { | |
| %456 = "torch.aten.broadcast_to"(%455, %450) : (!torch.vtensor<[1,1,1,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsub.Scalar'(0x86f5ed0) { | |
| %457 = "torch.aten.rsub.Scalar"(%456, %312, %2) : (!torch.vtensor<[1,1,128,128],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f57d0) { | |
| %458 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x86f6960) { | |
| %459 = "torch.aten.empty.memory_format"(%458, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x86f6af0) { | |
| %460 = "torch.valsem.aten.copy"(%459, %457, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.masked_fill.Scalar'(0x86f6c20) { | |
| %461 = "torch.aten.masked_fill.Scalar"(%457, %460, %301) : (!torch.vtensor<[1,1,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>, !torch.float) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f6d50) { | |
| %462 = "torch.aten.add.Tensor"(%461, %451, %2) : (!torch.vtensor<[1,1,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,1,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f6e80) { | |
| %463 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86f6870) { | |
| %464 = "torch.aten.sum.dim_IntList"(%401, %463, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86f6fd0) { | |
| %465 = "torch.aten.div.Scalar"(%464, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86f70e0) { | |
| %466 = "torch.aten.size.int"(%401, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f71f0) { | |
| %467 = "torch.prim.ListConstruct"(%466, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86f7320) { | |
| %468 = "torch.aten.broadcast_to"(%465, %467) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86f7430) { | |
| %469 = "torch.aten.sub.Tensor"(%401, %468, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f7560) { | |
| %470 = "torch.aten.mul.Tensor"(%469, %469) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86f7670) { | |
| %471 = "torch.aten.sum.dim_IntList"(%470, %463, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86f7fd0) { | |
| %472 = "torch.aten.div.Scalar"(%471, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86f80e0) { | |
| %473 = "torch.aten.add.Scalar"(%472, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86f8210) { | |
| %474 = "torch.aten.rsqrt"(%473) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86f8300) { | |
| %475 = "torch.aten.size.int"(%401, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f8410) { | |
| %476 = "torch.prim.ListConstruct"(%475, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86f8540) { | |
| %477 = "torch.aten.broadcast_to"(%474, %476) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f8650) { | |
| %478 = "torch.aten.mul.Tensor"(%469, %477) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f8760) { | |
| %479 = "torch.aten.mul.Tensor"(%478, %295) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f8870) { | |
| %480 = "torch.aten.add.Tensor"(%479, %294, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86f89a0) { | |
| %481 = "torch.aten.transpose.int"(%293, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f8ad0) { | |
| %482 = "torch.prim.ListConstruct"(%1, %0) : (!torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f8be0) { | |
| %483 = "torch.aten.view"(%480, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86f8cf0) { | |
| %484 = "torch.aten.mm"(%483, %481) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86f8e00) { | |
| %485 = "torch.aten.mul.Scalar"(%292, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f8f10) { | |
| %486 = "torch.aten.add.Tensor"(%485, %484, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f9040) { | |
| %487 = "torch.prim.ListConstruct"(%2, %1, %313) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f9170) { | |
| %488 = "torch.aten.view"(%486, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8675df0) { | |
| %489 = "torch.prim.ListConstruct"(%2, %1, %310, %314) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f92e0) { | |
| %490 = "torch.aten.view"(%488, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f94e0) { | |
| %491 = "torch.aten.slice.Tensor"(%490, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f9740) { | |
| %492 = "torch.aten.slice.Tensor"(%490, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86f98b0) { | |
| %493 = "torch.aten.slice.Tensor"(%490, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86f9a20) { | |
| %494 = "torch.aten.transpose.int"(%491, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f9b50) { | |
| %495 = "torch.prim.ListConstruct"(%310, %1, %315) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f9c80) { | |
| %496 = "torch.aten.view"(%494, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f9650) { | |
| %497 = "torch.prim.ListConstruct"(%306, %307, %311, %2) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x86f9df0) { | |
| %498 = "torch.aten.permute"(%492, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f9f00) { | |
| %499 = "torch.prim.ListConstruct"(%310, %315, %1) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fa030) { | |
| %500 = "torch.aten.view"(%498, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x86fa140) { | |
| %501 = "torch.aten.bmm"(%496, %500) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fa250) { | |
| %502 = "torch.aten.mul.Scalar"(%501, %316) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fa360) { | |
| %503 = "torch.aten.mul.Scalar"(%436, %312) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86fa470) { | |
| %504 = "torch.aten.add.Tensor"(%502, %503, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f93f0) { | |
| %505 = "torch.prim.ListConstruct"(%303, %310, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fa600) { | |
| %506 = "torch.aten.view"(%504, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fa710) { | |
| %507 = "torch.aten.mul.Scalar"(%506, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86fa820) { | |
| %508 = "torch.aten.add.Tensor"(%507, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x86fa950) { | |
| %509 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x86faa60) { | |
| %510 = "torch.aten.maximum"(%508, %509) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x85a2020) { | |
| %511:2 = "torch.aten.max.dim"(%510, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86faca0) { | |
| %512 = "torch.aten.sub.Tensor"(%510, %511#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x86fadd0) { | |
| %513 = "torch.aten.exp"(%512) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86faec0) { | |
| %514 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86fafb0) { | |
| %515 = "torch.aten.sum.dim_IntList"(%513, %514, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x86fb100) { | |
| %516 = "torch.aten.div.Tensor"(%513, %515) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8675c50) { | |
| %517 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x86fb360) { | |
| %518 = "torch.aten.empty.memory_format"(%517, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x86fb4f0) { | |
| %519 = "torch.valsem.aten.copy"(%518, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x86fb620) { | |
| %520 = "torch.aten.bitwise_not"(%519) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86fb710) { | |
| %521 = "torch.aten.mul.Tensor"(%516, %520) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fb820) { | |
| %522 = "torch.prim.ListConstruct"(%310, %1, %1) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fb950) { | |
| %523 = "torch.aten.view"(%521, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86fba60) { | |
| %524 = "torch.aten.transpose.int"(%493, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fbb90) { | |
| %525 = "torch.aten.view"(%524, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x86fbca0) { | |
| %526 = "torch.aten.bmm"(%523, %525) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fb270) { | |
| %527 = "torch.prim.ListConstruct"(%2, %310, %1, %315) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fbe10) { | |
| %528 = "torch.aten.view"(%526, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8653550) { | |
| %529 = "torch.prim.ListConstruct"(%306, %307, %2, %311) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x86fbf80) { | |
| %530 = "torch.aten.permute"(%528, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x86fc090) { | |
| %531 = "torch.aten.clone"(%530, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fc1a0) { | |
| %532 = "torch.prim.ListConstruct"(%2, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fc2d0) { | |
| %533 = "torch.aten.view"(%531, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86fc3e0) { | |
| %534 = "torch.aten.transpose.int"(%290, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fc510) { | |
| %535 = "torch.aten.view"(%533, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86fc620) { | |
| %536 = "torch.aten.mm"(%535, %534) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fc730) { | |
| %537 = "torch.aten.mul.Scalar"(%289, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86fc840) { | |
| %538 = "torch.aten.add.Tensor"(%537, %536, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fc970) { | |
| %539 = "torch.aten.view"(%538, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86fca80) { | |
| %540 = "torch.aten.add.Tensor"(%401, %539, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fcbb0) { | |
| %541 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86fcca0) { | |
| %542 = "torch.aten.sum.dim_IntList"(%540, %541, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86fcdf0) { | |
| %543 = "torch.aten.div.Scalar"(%542, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86fcf00) { | |
| %544 = "torch.aten.size.int"(%540, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fd010) { | |
| %545 = "torch.prim.ListConstruct"(%544, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86fd140) { | |
| %546 = "torch.aten.broadcast_to"(%543, %545) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86fd250) { | |
| %547 = "torch.aten.sub.Tensor"(%540, %546, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86fd380) { | |
| %548 = "torch.aten.mul.Tensor"(%547, %547) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86fd490) { | |
| %549 = "torch.aten.sum.dim_IntList"(%548, %541, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86fd5e0) { | |
| %550 = "torch.aten.div.Scalar"(%549, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86fd6f0) { | |
| %551 = "torch.aten.add.Scalar"(%550, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86fd820) { | |
| %552 = "torch.aten.rsqrt"(%551) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86fd910) { | |
| %553 = "torch.aten.size.int"(%540, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fda20) { | |
| %554 = "torch.prim.ListConstruct"(%553, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86fdb50) { | |
| %555 = "torch.aten.broadcast_to"(%552, %554) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86fdc60) { | |
| %556 = "torch.aten.mul.Tensor"(%547, %555) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86fdd70) { | |
| %557 = "torch.aten.mul.Tensor"(%556, %288) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86fde80) { | |
| %558 = "torch.aten.add.Tensor"(%557, %287, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86fdfb0) { | |
| %559 = "torch.aten.transpose.int"(%286, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fe0e0) { | |
| %560 = "torch.aten.view"(%558, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86fe1f0) { | |
| %561 = "torch.aten.mm"(%560, %559) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fe300) { | |
| %562 = "torch.aten.mul.Scalar"(%285, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86fe410) { | |
| %563 = "torch.aten.add.Tensor"(%562, %561, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86fe540) { | |
| %564 = "torch.prim.ListConstruct"(%2, %1, %317) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fe670) { | |
| %565 = "torch.aten.view"(%563, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fe780) { | |
| %566 = "torch.aten.mul.Scalar"(%565, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fe890) { | |
| %567 = "torch.aten.mul.Scalar"(%565, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86fe9a0) { | |
| %568 = "torch.aten.mul.Scalar"(%565, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86feab0) { | |
| %569 = "torch.aten.mul.Tensor"(%568, %565) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86febc0) { | |
| %570 = "torch.aten.add.Scalar"(%569, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86fecf0) { | |
| %571 = "torch.aten.mul.Tensor"(%567, %570) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x86fee00) { | |
| %572 = "torch.aten.tanh"(%571) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86feef0) { | |
| %573 = "torch.aten.add.Scalar"(%572, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86ff020) { | |
| %574 = "torch.aten.mul.Tensor"(%566, %573) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86ff130) { | |
| %575 = "torch.aten.transpose.int"(%284, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f2f40) { | |
| %576 = "torch.prim.ListConstruct"(%1, %317) : (!torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f3050) { | |
| %577 = "torch.aten.view"(%574, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86f3160) { | |
| %578 = "torch.aten.mm"(%577, %575) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86f3270) { | |
| %579 = "torch.aten.mul.Scalar"(%283, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f3380) { | |
| %580 = "torch.aten.add.Tensor"(%579, %578, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f34b0) { | |
| %581 = "torch.aten.view"(%580, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f35c0) { | |
| %582 = "torch.aten.add.Tensor"(%540, %581, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f36f0) { | |
| %583 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86f37e0) { | |
| %584 = "torch.aten.sum.dim_IntList"(%582, %583, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86f3930) { | |
| %585 = "torch.aten.div.Scalar"(%584, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86f3a40) { | |
| %586 = "torch.aten.size.int"(%582, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f3b50) { | |
| %587 = "torch.prim.ListConstruct"(%586, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86f3c80) { | |
| %588 = "torch.aten.broadcast_to"(%585, %587) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86f3d90) { | |
| %589 = "torch.aten.sub.Tensor"(%582, %588, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86f3ec0) { | |
| %590 = "torch.aten.mul.Tensor"(%589, %589) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86f3fd0) { | |
| %591 = "torch.aten.sum.dim_IntList"(%590, %583, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86f4120) { | |
| %592 = "torch.aten.div.Scalar"(%591, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86f4230) { | |
| %593 = "torch.aten.add.Scalar"(%592, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86f4360) { | |
| %594 = "torch.aten.rsqrt"(%593) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86f4450) { | |
| %595 = "torch.aten.size.int"(%582, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86f4560) { | |
| %596 = "torch.prim.ListConstruct"(%595, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8702210) { | |
| %597 = "torch.aten.broadcast_to"(%594, %596) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87022c0) { | |
| %598 = "torch.aten.mul.Tensor"(%589, %597) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87023d0) { | |
| %599 = "torch.aten.mul.Tensor"(%598, %282) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x87024e0) { | |
| %600 = "torch.aten.add.Tensor"(%599, %281, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8702610) { | |
| %601 = "torch.aten.transpose.int"(%280, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8702740) { | |
| %602 = "torch.aten.view"(%600, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8702850) { | |
| %603 = "torch.aten.mm"(%602, %601) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8702960) { | |
| %604 = "torch.aten.mul.Scalar"(%279, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8702a70) { | |
| %605 = "torch.aten.add.Tensor"(%604, %603, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8702ba0) { | |
| %606 = "torch.aten.view"(%605, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8702cb0) { | |
| %607 = "torch.aten.view"(%606, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8702eb0) { | |
| %608 = "torch.aten.slice.Tensor"(%607, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8703110) { | |
| %609 = "torch.aten.slice.Tensor"(%607, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8703370) { | |
| %610 = "torch.aten.slice.Tensor"(%607, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x87034e0) { | |
| %611 = "torch.aten.transpose.int"(%608, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8703610) { | |
| %612 = "torch.aten.view"(%611, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x8703720) { | |
| %613 = "torch.aten.permute"(%609, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8703830) { | |
| %614 = "torch.aten.view"(%613, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x8703940) { | |
| %615 = "torch.aten.bmm"(%612, %614) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8703a50) { | |
| %616 = "torch.aten.mul.Scalar"(%615, %316) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8703b60) { | |
| %617 = "torch.aten.mul.Scalar"(%436, %312) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8703c70) { | |
| %618 = "torch.aten.add.Tensor"(%616, %617, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8703da0) { | |
| %619 = "torch.aten.view"(%618, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8703eb0) { | |
| %620 = "torch.aten.mul.Scalar"(%619, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8703fc0) { | |
| %621 = "torch.aten.add.Tensor"(%620, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x87040f0) { | |
| %622 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x8704200) { | |
| %623 = "torch.aten.maximum"(%621, %622) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x85a3fb0) { | |
| %624:2 = "torch.aten.max.dim"(%623, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x87043d0) { | |
| %625 = "torch.aten.sub.Tensor"(%623, %624#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x8704500) { | |
| %626 = "torch.aten.exp"(%625) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87045f0) { | |
| %627 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87046e0) { | |
| %628 = "torch.aten.sum.dim_IntList"(%626, %627, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x8704830) { | |
| %629 = "torch.aten.div.Tensor"(%626, %628) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8703280) { | |
| %630 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x8704a90) { | |
| %631 = "torch.aten.empty.memory_format"(%630, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x8704c20) { | |
| %632 = "torch.valsem.aten.copy"(%631, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x8704d50) { | |
| %633 = "torch.aten.bitwise_not"(%632) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8704e40) { | |
| %634 = "torch.aten.mul.Tensor"(%629, %633) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8704f50) { | |
| %635 = "torch.aten.view"(%634, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8705060) { | |
| %636 = "torch.aten.transpose.int"(%610, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8705190) { | |
| %637 = "torch.aten.view"(%636, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x87052a0) { | |
| %638 = "torch.aten.bmm"(%635, %637) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87053b0) { | |
| %639 = "torch.aten.view"(%638, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x87054c0) { | |
| %640 = "torch.aten.permute"(%639, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x87055d0) { | |
| %641 = "torch.aten.clone"(%640, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87056e0) { | |
| %642 = "torch.aten.view"(%641, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x87057f0) { | |
| %643 = "torch.aten.transpose.int"(%278, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8705920) { | |
| %644 = "torch.aten.view"(%642, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8705a30) { | |
| %645 = "torch.aten.mm"(%644, %643) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8705b40) { | |
| %646 = "torch.aten.mul.Scalar"(%277, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8705c50) { | |
| %647 = "torch.aten.add.Tensor"(%646, %645, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8705d80) { | |
| %648 = "torch.aten.view"(%647, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8705e90) { | |
| %649 = "torch.aten.add.Tensor"(%582, %648, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8705fc0) { | |
| %650 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87068c0) { | |
| %651 = "torch.aten.sum.dim_IntList"(%649, %650, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8706a10) { | |
| %652 = "torch.aten.div.Scalar"(%651, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8706b20) { | |
| %653 = "torch.aten.size.int"(%649, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8706c30) { | |
| %654 = "torch.prim.ListConstruct"(%653, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86df530) { | |
| %655 = "torch.aten.broadcast_to"(%652, %654) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86df640) { | |
| %656 = "torch.aten.sub.Tensor"(%649, %655, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86df770) { | |
| %657 = "torch.aten.mul.Tensor"(%656, %656) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86df880) { | |
| %658 = "torch.aten.sum.dim_IntList"(%657, %650, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86df9d0) { | |
| %659 = "torch.aten.div.Scalar"(%658, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86dfae0) { | |
| %660 = "torch.aten.add.Scalar"(%659, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86dfc10) { | |
| %661 = "torch.aten.rsqrt"(%660) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8707d70) { | |
| %662 = "torch.aten.size.int"(%649, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8707e40) { | |
| %663 = "torch.prim.ListConstruct"(%662, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8707f70) { | |
| %664 = "torch.aten.broadcast_to"(%661, %663) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8708890) { | |
| %665 = "torch.aten.mul.Tensor"(%656, %664) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87089a0) { | |
| %666 = "torch.aten.mul.Tensor"(%665, %276) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8708ab0) { | |
| %667 = "torch.aten.add.Tensor"(%666, %275, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x87093f0) { | |
| %668 = "torch.aten.transpose.int"(%274, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8709520) { | |
| %669 = "torch.aten.view"(%667, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8709630) { | |
| %670 = "torch.aten.mm"(%669, %668) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8709740) { | |
| %671 = "torch.aten.mul.Scalar"(%273, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8709850) { | |
| %672 = "torch.aten.add.Tensor"(%671, %670, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8709980) { | |
| %673 = "torch.aten.view"(%672, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8709a90) { | |
| %674 = "torch.aten.mul.Scalar"(%673, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8709ba0) { | |
| %675 = "torch.aten.mul.Scalar"(%673, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8709cb0) { | |
| %676 = "torch.aten.mul.Scalar"(%673, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8709dc0) { | |
| %677 = "torch.aten.mul.Tensor"(%676, %673) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8709ed0) { | |
| %678 = "torch.aten.add.Scalar"(%677, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x870a000) { | |
| %679 = "torch.aten.mul.Tensor"(%675, %678) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x870a110) { | |
| %680 = "torch.aten.tanh"(%679) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x870a200) { | |
| %681 = "torch.aten.add.Scalar"(%680, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x870a330) { | |
| %682 = "torch.aten.mul.Tensor"(%674, %681) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x870a440) { | |
| %683 = "torch.aten.transpose.int"(%272, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870ad80) { | |
| %684 = "torch.aten.view"(%682, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x870ae90) { | |
| %685 = "torch.aten.mm"(%684, %683) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870afa0) { | |
| %686 = "torch.aten.mul.Scalar"(%271, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870b0b0) { | |
| %687 = "torch.aten.add.Tensor"(%686, %685, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870b1e0) { | |
| %688 = "torch.aten.view"(%687, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870b2f0) { | |
| %689 = "torch.aten.add.Tensor"(%649, %688, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870b420) { | |
| %690 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x870b510) { | |
| %691 = "torch.aten.sum.dim_IntList"(%689, %690, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x870b660) { | |
| %692 = "torch.aten.div.Scalar"(%691, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x870b770) { | |
| %693 = "torch.aten.size.int"(%689, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870b880) { | |
| %694 = "torch.prim.ListConstruct"(%693, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x870b9b0) { | |
| %695 = "torch.aten.broadcast_to"(%692, %694) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x870bac0) { | |
| %696 = "torch.aten.sub.Tensor"(%689, %695, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x870bbf0) { | |
| %697 = "torch.aten.mul.Tensor"(%696, %696) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x870bd00) { | |
| %698 = "torch.aten.sum.dim_IntList"(%697, %690, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x870be50) { | |
| %699 = "torch.aten.div.Scalar"(%698, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x870c770) { | |
| %700 = "torch.aten.add.Scalar"(%699, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x870c8a0) { | |
| %701 = "torch.aten.rsqrt"(%700) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x870c990) { | |
| %702 = "torch.aten.size.int"(%689, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870caa0) { | |
| %703 = "torch.prim.ListConstruct"(%702, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x870cbd0) { | |
| %704 = "torch.aten.broadcast_to"(%701, %703) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x870cce0) { | |
| %705 = "torch.aten.mul.Tensor"(%696, %704) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x870cdf0) { | |
| %706 = "torch.aten.mul.Tensor"(%705, %270) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870cf00) { | |
| %707 = "torch.aten.add.Tensor"(%706, %269, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x870d030) { | |
| %708 = "torch.aten.transpose.int"(%268, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870d160) { | |
| %709 = "torch.aten.view"(%707, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x870d270) { | |
| %710 = "torch.aten.mm"(%709, %708) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870d380) { | |
| %711 = "torch.aten.mul.Scalar"(%267, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870d490) { | |
| %712 = "torch.aten.add.Tensor"(%711, %710, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870d5c0) { | |
| %713 = "torch.aten.view"(%712, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870d6d0) { | |
| %714 = "torch.aten.view"(%713, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x870d8d0) { | |
| %715 = "torch.aten.slice.Tensor"(%714, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x870da40) { | |
| %716 = "torch.aten.slice.Tensor"(%714, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x870dbb0) { | |
| %717 = "torch.aten.slice.Tensor"(%714, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x870dd20) { | |
| %718 = "torch.aten.transpose.int"(%715, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870de50) { | |
| %719 = "torch.aten.view"(%718, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x870df60) { | |
| %720 = "torch.aten.permute"(%716, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870e070) { | |
| %721 = "torch.aten.view"(%720, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x870e180) { | |
| %722 = "torch.aten.bmm"(%719, %721) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870e290) { | |
| %723 = "torch.aten.mul.Scalar"(%722, %321) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870e3a0) { | |
| %724 = "torch.aten.mul.Scalar"(%436, %318) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870e4b0) { | |
| %725 = "torch.aten.add.Tensor"(%723, %724, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870e5e0) { | |
| %726 = "torch.aten.view"(%725, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870e6f0) { | |
| %727 = "torch.aten.mul.Scalar"(%726, %307) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870e800) { | |
| %728 = "torch.aten.add.Tensor"(%727, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x870e930) { | |
| %729 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x870ea40) { | |
| %730 = "torch.aten.maximum"(%728, %729) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x85a3770) { | |
| %731:2 = "torch.aten.max.dim"(%730, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x870ec10) { | |
| %732 = "torch.aten.sub.Tensor"(%730, %731#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x870ed40) { | |
| %733 = "torch.aten.exp"(%732) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870ee30) { | |
| %734 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x870d7e0) { | |
| %735 = "torch.aten.sum.dim_IntList"(%733, %734, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x870ef80) { | |
| %736 = "torch.aten.div.Tensor"(%733, %735) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87049a0) { | |
| %737 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x870f1e0) { | |
| %738 = "torch.aten.empty.memory_format"(%737, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x86cc0c0) { | |
| %739 = "torch.valsem.aten.copy"(%738, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x86cc1f0) { | |
| %740 = "torch.aten.bitwise_not"(%739) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86cc2e0) { | |
| %741 = "torch.aten.mul.Tensor"(%736, %740) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870fb20) { | |
| %742 = "torch.aten.view"(%741, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x870fbd0) { | |
| %743 = "torch.aten.transpose.int"(%717, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870fce0) { | |
| %744 = "torch.aten.view"(%743, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x870fdf0) { | |
| %745 = "torch.aten.bmm"(%742, %744) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870ff00) { | |
| %746 = "torch.aten.view"(%745, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x8710010) { | |
| %747 = "torch.aten.permute"(%746, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x8710120) { | |
| %748 = "torch.aten.clone"(%747, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8710230) { | |
| %749 = "torch.aten.view"(%748, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8710340) { | |
| %750 = "torch.aten.transpose.int"(%266, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x868fe40) { | |
| %751 = "torch.aten.view"(%749, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x868ff50) { | |
| %752 = "torch.aten.mm"(%751, %750) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8690060) { | |
| %753 = "torch.aten.mul.Scalar"(%265, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86bd930) { | |
| %754 = "torch.aten.add.Tensor"(%753, %752, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86bda60) { | |
| %755 = "torch.aten.view"(%754, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86bdb70) { | |
| %756 = "torch.aten.add.Tensor"(%689, %755, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86bdca0) { | |
| %757 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86bdd90) { | |
| %758 = "torch.aten.sum.dim_IntList"(%756, %757, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86bdee0) { | |
| %759 = "torch.aten.div.Scalar"(%758, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86bdff0) { | |
| %760 = "torch.aten.size.int"(%756, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8711c30) { | |
| %761 = "torch.prim.ListConstruct"(%760, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8711d00) { | |
| %762 = "torch.aten.broadcast_to"(%759, %761) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8711db0) { | |
| %763 = "torch.aten.sub.Tensor"(%756, %762, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8711ee0) { | |
| %764 = "torch.aten.mul.Tensor"(%763, %763) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8711ff0) { | |
| %765 = "torch.aten.sum.dim_IntList"(%764, %757, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8712140) { | |
| %766 = "torch.aten.div.Scalar"(%765, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8712250) { | |
| %767 = "torch.aten.add.Scalar"(%766, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86d0250) { | |
| %768 = "torch.aten.rsqrt"(%767) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86d0340) { | |
| %769 = "torch.aten.size.int"(%756, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86d0450) { | |
| %770 = "torch.prim.ListConstruct"(%769, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8712b30) { | |
| %771 = "torch.aten.broadcast_to"(%768, %770) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86e1a50) { | |
| %772 = "torch.aten.mul.Tensor"(%763, %771) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86e1b40) { | |
| %773 = "torch.aten.mul.Tensor"(%772, %264) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86e1c50) { | |
| %774 = "torch.aten.add.Tensor"(%773, %263, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86e1d80) { | |
| %775 = "torch.aten.transpose.int"(%262, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86e1eb0) { | |
| %776 = "torch.aten.view"(%774, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86e1fc0) { | |
| %777 = "torch.aten.mm"(%776, %775) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86e20d0) { | |
| %778 = "torch.aten.mul.Scalar"(%261, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86d2d50) { | |
| %779 = "torch.aten.add.Tensor"(%778, %777, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86d2e80) { | |
| %780 = "torch.aten.view"(%779, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86d2f70) { | |
| %781 = "torch.aten.mul.Scalar"(%780, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86d3080) { | |
| %782 = "torch.aten.mul.Scalar"(%780, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86d3190) { | |
| %783 = "torch.aten.mul.Scalar"(%780, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86d32a0) { | |
| %784 = "torch.aten.mul.Tensor"(%783, %780) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86d33b0) { | |
| %785 = "torch.aten.add.Scalar"(%784, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86e8560) { | |
| %786 = "torch.aten.mul.Tensor"(%782, %785) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x86e8670) { | |
| %787 = "torch.aten.tanh"(%786) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86e8760) { | |
| %788 = "torch.aten.add.Scalar"(%787, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86e98a0) { | |
| %789 = "torch.aten.mul.Tensor"(%781, %788) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86e99b0) { | |
| %790 = "torch.aten.transpose.int"(%260, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86e9ae0) { | |
| %791 = "torch.aten.view"(%789, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86e9bf0) { | |
| %792 = "torch.aten.mm"(%791, %790) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86e9d00) { | |
| %793 = "torch.aten.mul.Scalar"(%259, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86e9e10) { | |
| %794 = "torch.aten.add.Tensor"(%793, %792, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86e9f40) { | |
| %795 = "torch.aten.view"(%794, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86ea050) { | |
| %796 = "torch.aten.add.Tensor"(%756, %795, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86ea180) { | |
| %797 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86ea270) { | |
| %798 = "torch.aten.sum.dim_IntList"(%796, %797, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86ea3c0) { | |
| %799 = "torch.aten.div.Scalar"(%798, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86ea4d0) { | |
| %800 = "torch.aten.size.int"(%796, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86ea5e0) { | |
| %801 = "torch.prim.ListConstruct"(%800, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86ea710) { | |
| %802 = "torch.aten.broadcast_to"(%799, %801) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86ea820) { | |
| %803 = "torch.aten.sub.Tensor"(%796, %802, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86ea950) { | |
| %804 = "torch.aten.mul.Tensor"(%803, %803) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86eaa60) { | |
| %805 = "torch.aten.sum.dim_IntList"(%804, %797, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x86eabb0) { | |
| %806 = "torch.aten.div.Scalar"(%805, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x86eacc0) { | |
| %807 = "torch.aten.add.Scalar"(%806, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x86eadf0) { | |
| %808 = "torch.aten.rsqrt"(%807) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x86eaee0) { | |
| %809 = "torch.aten.size.int"(%796, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86eaff0) { | |
| %810 = "torch.prim.ListConstruct"(%809, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x86eb120) { | |
| %811 = "torch.aten.broadcast_to"(%808, %810) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86eb230) { | |
| %812 = "torch.aten.mul.Tensor"(%803, %811) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86eb340) { | |
| %813 = "torch.aten.mul.Tensor"(%812, %258) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f6000) { | |
| %814 = "torch.aten.add.Tensor"(%813, %257, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86f6130) { | |
| %815 = "torch.aten.transpose.int"(%256, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f6260) { | |
| %816 = "torch.aten.view"(%814, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x86f6370) { | |
| %817 = "torch.aten.mm"(%816, %815) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86f6480) { | |
| %818 = "torch.aten.mul.Scalar"(%255, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f6590) { | |
| %819 = "torch.aten.add.Tensor"(%818, %817, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f66c0) { | |
| %820 = "torch.aten.view"(%819, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86e2860) { | |
| %821 = "torch.aten.view"(%820, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86e2a20) { | |
| %822 = "torch.aten.slice.Tensor"(%821, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86e2b90) { | |
| %823 = "torch.aten.slice.Tensor"(%821, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x86e2d00) { | |
| %824 = "torch.aten.slice.Tensor"(%821, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86e2e70) { | |
| %825 = "torch.aten.transpose.int"(%822, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86e2fa0) { | |
| %826 = "torch.aten.view"(%825, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x86ed470) { | |
| %827 = "torch.aten.permute"(%823, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f77c0) { | |
| %828 = "torch.aten.view"(%827, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x86f78d0) { | |
| %829 = "torch.aten.bmm"(%826, %828) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86f79e0) { | |
| %830 = "torch.aten.mul.Scalar"(%829, %322) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86f7af0) { | |
| %831 = "torch.aten.mul.Scalar"(%436, %323) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f7ba0) { | |
| %832 = "torch.aten.add.Tensor"(%830, %831, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86f7cb0) { | |
| %833 = "torch.aten.view"(%832, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x86f7dc0) { | |
| %834 = "torch.aten.mul.Scalar"(%833, %311) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x86f7ed0) { | |
| %835 = "torch.aten.add.Tensor"(%834, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x86ee5d0) { | |
| %836 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x86ee6e0) { | |
| %837 = "torch.aten.maximum"(%835, %836) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x85a3030) { | |
| %838:2 = "torch.aten.max.dim"(%837, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x86ee8b0) { | |
| %839 = "torch.aten.sub.Tensor"(%837, %838#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x86ee9e0) { | |
| %840 = "torch.aten.exp"(%839) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86eead0) { | |
| %841 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x86e2930) { | |
| %842 = "torch.aten.sum.dim_IntList"(%840, %841, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x86eec20) { | |
| %843 = "torch.aten.div.Tensor"(%840, %842) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870f0f0) { | |
| %844 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x86ff2f0) { | |
| %845 = "torch.aten.empty.memory_format"(%844, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x86ff440) { | |
| %846 = "torch.valsem.aten.copy"(%845, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x86ff570) { | |
| %847 = "torch.aten.bitwise_not"(%846) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x86ff660) { | |
| %848 = "torch.aten.mul.Tensor"(%843, %847) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86ff770) { | |
| %849 = "torch.aten.view"(%848, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x86ff880) { | |
| %850 = "torch.aten.transpose.int"(%824, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86ff9b0) { | |
| %851 = "torch.aten.view"(%850, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x86ffac0) { | |
| %852 = "torch.aten.bmm"(%849, %851) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86ffbd0) { | |
| %853 = "torch.aten.view"(%852, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x86ffce0) { | |
| %854 = "torch.aten.permute"(%853, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x86ffdf0) { | |
| %855 = "torch.aten.clone"(%854, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x86fff00) { | |
| %856 = "torch.aten.view"(%855, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8700010) { | |
| %857 = "torch.aten.transpose.int"(%254, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8700140) { | |
| %858 = "torch.aten.view"(%856, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8700250) { | |
| %859 = "torch.aten.mm"(%858, %857) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8700360) { | |
| %860 = "torch.aten.mul.Scalar"(%253, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8700470) { | |
| %861 = "torch.aten.add.Tensor"(%860, %859, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87005a0) { | |
| %862 = "torch.aten.view"(%861, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x87006b0) { | |
| %863 = "torch.aten.add.Tensor"(%796, %862, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87007e0) { | |
| %864 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87008d0) { | |
| %865 = "torch.aten.sum.dim_IntList"(%863, %864, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8700a20) { | |
| %866 = "torch.aten.div.Scalar"(%865, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8700b30) { | |
| %867 = "torch.aten.size.int"(%863, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8700c40) { | |
| %868 = "torch.prim.ListConstruct"(%867, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8700d70) { | |
| %869 = "torch.aten.broadcast_to"(%866, %868) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8700e80) { | |
| %870 = "torch.aten.sub.Tensor"(%863, %869, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8700fb0) { | |
| %871 = "torch.aten.mul.Tensor"(%870, %870) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87010c0) { | |
| %872 = "torch.aten.sum.dim_IntList"(%871, %864, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8701210) { | |
| %873 = "torch.aten.div.Scalar"(%872, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8701320) { | |
| %874 = "torch.aten.add.Scalar"(%873, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8701450) { | |
| %875 = "torch.aten.rsqrt"(%874) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8701540) { | |
| %876 = "torch.aten.size.int"(%863, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8701650) { | |
| %877 = "torch.prim.ListConstruct"(%876, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8701780) { | |
| %878 = "torch.aten.broadcast_to"(%875, %877) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8701890) { | |
| %879 = "torch.aten.mul.Tensor"(%870, %878) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87019a0) { | |
| %880 = "torch.aten.mul.Tensor"(%879, %252) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8701ab0) { | |
| %881 = "torch.aten.add.Tensor"(%880, %251, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8701be0) { | |
| %882 = "torch.aten.transpose.int"(%250, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8701d10) { | |
| %883 = "torch.aten.view"(%881, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8701e20) { | |
| %884 = "torch.aten.mm"(%883, %882) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8701f30) { | |
| %885 = "torch.aten.mul.Scalar"(%249, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8702040) { | |
| %886 = "torch.aten.add.Tensor"(%885, %884, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8726c50) { | |
| %887 = "torch.aten.view"(%886, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8726d00) { | |
| %888 = "torch.aten.mul.Scalar"(%887, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8726dd0) { | |
| %889 = "torch.aten.mul.Scalar"(%887, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8726ee0) { | |
| %890 = "torch.aten.mul.Scalar"(%887, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8726ff0) { | |
| %891 = "torch.aten.mul.Tensor"(%890, %887) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8727100) { | |
| %892 = "torch.aten.add.Scalar"(%891, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8727230) { | |
| %893 = "torch.aten.mul.Tensor"(%889, %892) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x8727340) { | |
| %894 = "torch.aten.tanh"(%893) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8727430) { | |
| %895 = "torch.aten.add.Scalar"(%894, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8727560) { | |
| %896 = "torch.aten.mul.Tensor"(%888, %895) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8727670) { | |
| %897 = "torch.aten.transpose.int"(%248, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87277a0) { | |
| %898 = "torch.aten.view"(%896, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x87278b0) { | |
| %899 = "torch.aten.mm"(%898, %897) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x87279c0) { | |
| %900 = "torch.aten.mul.Scalar"(%247, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8727ad0) { | |
| %901 = "torch.aten.add.Tensor"(%900, %899, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8727c00) { | |
| %902 = "torch.aten.view"(%901, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8727d10) { | |
| %903 = "torch.aten.add.Tensor"(%863, %902, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8727e40) { | |
| %904 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8727f30) { | |
| %905 = "torch.aten.sum.dim_IntList"(%903, %904, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8728080) { | |
| %906 = "torch.aten.div.Scalar"(%905, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8728190) { | |
| %907 = "torch.aten.size.int"(%903, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87282a0) { | |
| %908 = "torch.prim.ListConstruct"(%907, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x87283d0) { | |
| %909 = "torch.aten.broadcast_to"(%906, %908) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x87284e0) { | |
| %910 = "torch.aten.sub.Tensor"(%903, %909, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8728610) { | |
| %911 = "torch.aten.mul.Tensor"(%910, %910) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8728720) { | |
| %912 = "torch.aten.sum.dim_IntList"(%911, %904, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8728870) { | |
| %913 = "torch.aten.div.Scalar"(%912, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8728980) { | |
| %914 = "torch.aten.add.Scalar"(%913, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8728ab0) { | |
| %915 = "torch.aten.rsqrt"(%914) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8728ba0) { | |
| %916 = "torch.aten.size.int"(%903, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8728cb0) { | |
| %917 = "torch.prim.ListConstruct"(%916, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8728de0) { | |
| %918 = "torch.aten.broadcast_to"(%915, %917) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8728ef0) { | |
| %919 = "torch.aten.mul.Tensor"(%910, %918) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8729000) { | |
| %920 = "torch.aten.mul.Tensor"(%919, %246) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8729110) { | |
| %921 = "torch.aten.add.Tensor"(%920, %245, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8729240) { | |
| %922 = "torch.aten.transpose.int"(%244, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8729370) { | |
| %923 = "torch.aten.view"(%921, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8729480) { | |
| %924 = "torch.aten.mm"(%923, %922) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8729590) { | |
| %925 = "torch.aten.mul.Scalar"(%243, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x87296a0) { | |
| %926 = "torch.aten.add.Tensor"(%925, %924, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87297d0) { | |
| %927 = "torch.aten.view"(%926, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87298e0) { | |
| %928 = "torch.aten.view"(%927, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8729ae0) { | |
| %929 = "torch.aten.slice.Tensor"(%928, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8729c50) { | |
| %930 = "torch.aten.slice.Tensor"(%928, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8729dc0) { | |
| %931 = "torch.aten.slice.Tensor"(%928, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8729f30) { | |
| %932 = "torch.aten.transpose.int"(%929, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872a060) { | |
| %933 = "torch.aten.view"(%932, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x872a170) { | |
| %934 = "torch.aten.permute"(%930, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872a280) { | |
| %935 = "torch.aten.view"(%934, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x872a390) { | |
| %936 = "torch.aten.bmm"(%933, %935) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872a4a0) { | |
| %937 = "torch.aten.mul.Scalar"(%936, %324) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872a5b0) { | |
| %938 = "torch.aten.mul.Scalar"(%436, %325) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872a6c0) { | |
| %939 = "torch.aten.add.Tensor"(%937, %938, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872a7f0) { | |
| %940 = "torch.aten.view"(%939, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872a900) { | |
| %941 = "torch.aten.mul.Scalar"(%940, %326) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872aa10) { | |
| %942 = "torch.aten.add.Tensor"(%941, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x872ab40) { | |
| %943 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x872ac50) { | |
| %944 = "torch.aten.maximum"(%942, %943) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x85a25a0) { | |
| %945:2 = "torch.aten.max.dim"(%944, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x872ae20) { | |
| %946 = "torch.aten.sub.Tensor"(%944, %945#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x872af50) { | |
| %947 = "torch.aten.exp"(%946) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x872b040) { | |
| %948 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87299f0) { | |
| %949 = "torch.aten.sum.dim_IntList"(%947, %948, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x872b190) { | |
| %950 = "torch.aten.div.Tensor"(%947, %949) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x86ff200) { | |
| %951 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x872b3f0) { | |
| %952 = "torch.aten.empty.memory_format"(%951, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x872b580) { | |
| %953 = "torch.valsem.aten.copy"(%952, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x872b6b0) { | |
| %954 = "torch.aten.bitwise_not"(%953) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x872b7a0) { | |
| %955 = "torch.aten.mul.Tensor"(%950, %954) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872b8b0) { | |
| %956 = "torch.aten.view"(%955, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x872b9c0) { | |
| %957 = "torch.aten.transpose.int"(%931, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872baf0) { | |
| %958 = "torch.aten.view"(%957, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x872bc00) { | |
| %959 = "torch.aten.bmm"(%956, %958) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872bd10) { | |
| %960 = "torch.aten.view"(%959, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x872be20) { | |
| %961 = "torch.aten.permute"(%960, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x872bf30) { | |
| %962 = "torch.aten.clone"(%961, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872c040) { | |
| %963 = "torch.aten.view"(%962, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x872c150) { | |
| %964 = "torch.aten.transpose.int"(%242, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872c280) { | |
| %965 = "torch.aten.view"(%963, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x872c390) { | |
| %966 = "torch.aten.mm"(%965, %964) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872c4a0) { | |
| %967 = "torch.aten.mul.Scalar"(%241, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872c5b0) { | |
| %968 = "torch.aten.add.Tensor"(%967, %966, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872c6e0) { | |
| %969 = "torch.aten.view"(%968, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872c7f0) { | |
| %970 = "torch.aten.add.Tensor"(%903, %969, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x872c920) { | |
| %971 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x872ca10) { | |
| %972 = "torch.aten.sum.dim_IntList"(%970, %971, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x872cb60) { | |
| %973 = "torch.aten.div.Scalar"(%972, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x872cc70) { | |
| %974 = "torch.aten.size.int"(%970, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x872cd80) { | |
| %975 = "torch.prim.ListConstruct"(%974, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x872ceb0) { | |
| %976 = "torch.aten.broadcast_to"(%973, %975) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x872cfc0) { | |
| %977 = "torch.aten.sub.Tensor"(%970, %976, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x872d0f0) { | |
| %978 = "torch.aten.mul.Tensor"(%977, %977) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x872d200) { | |
| %979 = "torch.aten.sum.dim_IntList"(%978, %971, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x872d350) { | |
| %980 = "torch.aten.div.Scalar"(%979, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x872d460) { | |
| %981 = "torch.aten.add.Scalar"(%980, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x872d590) { | |
| %982 = "torch.aten.rsqrt"(%981) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x872d680) { | |
| %983 = "torch.aten.size.int"(%970, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87060b0) { | |
| %984 = "torch.prim.ListConstruct"(%983, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x87061e0) { | |
| %985 = "torch.aten.broadcast_to"(%982, %984) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87062f0) { | |
| %986 = "torch.aten.mul.Tensor"(%977, %985) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8706400) { | |
| %987 = "torch.aten.mul.Tensor"(%986, %240) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8706510) { | |
| %988 = "torch.aten.add.Tensor"(%987, %239, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8706640) { | |
| %989 = "torch.aten.transpose.int"(%238, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8706770) { | |
| %990 = "torch.aten.view"(%988, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x872e760) { | |
| %991 = "torch.aten.mm"(%990, %989) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872e870) { | |
| %992 = "torch.aten.mul.Scalar"(%237, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872e980) { | |
| %993 = "torch.aten.add.Tensor"(%992, %991, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872eab0) { | |
| %994 = "torch.aten.view"(%993, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872ebc0) { | |
| %995 = "torch.aten.mul.Scalar"(%994, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872ecd0) { | |
| %996 = "torch.aten.mul.Scalar"(%994, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872ede0) { | |
| %997 = "torch.aten.mul.Scalar"(%994, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x872eef0) { | |
| %998 = "torch.aten.mul.Tensor"(%997, %994) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x872f000) { | |
| %999 = "torch.aten.add.Scalar"(%998, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x872f130) { | |
| %1000 = "torch.aten.mul.Tensor"(%996, %999) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x872f240) { | |
| %1001 = "torch.aten.tanh"(%1000) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x872f330) { | |
| %1002 = "torch.aten.add.Scalar"(%1001, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x872f460) { | |
| %1003 = "torch.aten.mul.Tensor"(%995, %1002) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x872f570) { | |
| %1004 = "torch.aten.transpose.int"(%236, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872f6a0) { | |
| %1005 = "torch.aten.view"(%1003, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x872f7b0) { | |
| %1006 = "torch.aten.mm"(%1005, %1004) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x872f8c0) { | |
| %1007 = "torch.aten.mul.Scalar"(%235, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872f9d0) { | |
| %1008 = "torch.aten.add.Tensor"(%1007, %1006, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x872fb00) { | |
| %1009 = "torch.aten.view"(%1008, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x872fc10) { | |
| %1010 = "torch.aten.add.Tensor"(%970, %1009, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x872fd40) { | |
| %1011 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x872fe30) { | |
| %1012 = "torch.aten.sum.dim_IntList"(%1010, %1011, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x872ff80) { | |
| %1013 = "torch.aten.div.Scalar"(%1012, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8730090) { | |
| %1014 = "torch.aten.size.int"(%1010, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87301a0) { | |
| %1015 = "torch.prim.ListConstruct"(%1014, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x87302d0) { | |
| %1016 = "torch.aten.broadcast_to"(%1013, %1015) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x87303e0) { | |
| %1017 = "torch.aten.sub.Tensor"(%1010, %1016, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8730510) { | |
| %1018 = "torch.aten.mul.Tensor"(%1017, %1017) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8708be0) { | |
| %1019 = "torch.aten.sum.dim_IntList"(%1018, %1011, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8708d30) { | |
| %1020 = "torch.aten.div.Scalar"(%1019, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8708e40) { | |
| %1021 = "torch.aten.add.Scalar"(%1020, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8708f70) { | |
| %1022 = "torch.aten.rsqrt"(%1021) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8709060) { | |
| %1023 = "torch.aten.size.int"(%1010, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8709170) { | |
| %1024 = "torch.prim.ListConstruct"(%1023, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x87092a0) { | |
| %1025 = "torch.aten.broadcast_to"(%1022, %1024) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87315f0) { | |
| %1026 = "torch.aten.mul.Tensor"(%1017, %1025) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8731700) { | |
| %1027 = "torch.aten.mul.Tensor"(%1026, %234) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8731810) { | |
| %1028 = "torch.aten.add.Tensor"(%1027, %233, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x870bf60) { | |
| %1029 = "torch.aten.transpose.int"(%232, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870c090) { | |
| %1030 = "torch.aten.view"(%1028, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x870c1a0) { | |
| %1031 = "torch.aten.mm"(%1030, %1029) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870c2b0) { | |
| %1032 = "torch.aten.mul.Scalar"(%231, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870c3c0) { | |
| %1033 = "torch.aten.add.Tensor"(%1032, %1031, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870c4f0) { | |
| %1034 = "torch.aten.view"(%1033, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870c600) { | |
| %1035 = "torch.aten.view"(%1034, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8710500) { | |
| %1036 = "torch.aten.slice.Tensor"(%1035, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8710670) { | |
| %1037 = "torch.aten.slice.Tensor"(%1035, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x87107e0) { | |
| %1038 = "torch.aten.slice.Tensor"(%1035, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8710950) { | |
| %1039 = "torch.aten.transpose.int"(%1036, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8710a80) { | |
| %1040 = "torch.aten.view"(%1039, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x8733900) { | |
| %1041 = "torch.aten.permute"(%1037, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87339b0) { | |
| %1042 = "torch.aten.view"(%1041, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x8733aa0) { | |
| %1043 = "torch.aten.bmm"(%1040, %1042) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8733bb0) { | |
| %1044 = "torch.aten.mul.Scalar"(%1043, %327) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8733cc0) { | |
| %1045 = "torch.aten.mul.Scalar"(%436, %328) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8733dd0) { | |
| %1046 = "torch.aten.add.Tensor"(%1044, %1045, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8733f00) { | |
| %1047 = "torch.aten.view"(%1046, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8734010) { | |
| %1048 = "torch.aten.mul.Scalar"(%1047, %329) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8734120) { | |
| %1049 = "torch.aten.add.Tensor"(%1048, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x8734250) { | |
| %1050 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x8734360) { | |
| %1051 = "torch.aten.maximum"(%1049, %1050) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x85a28f0) { | |
| %1052:2 = "torch.aten.max.dim"(%1051, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8734530) { | |
| %1053 = "torch.aten.sub.Tensor"(%1051, %1052#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x8734660) { | |
| %1054 = "torch.aten.exp"(%1053) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8734750) { | |
| %1055 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8710410) { | |
| %1056 = "torch.aten.sum.dim_IntList"(%1054, %1055, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x87348a0) { | |
| %1057 = "torch.aten.div.Tensor"(%1054, %1056) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x872b300) { | |
| %1058 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x8734b00) { | |
| %1059 = "torch.aten.empty.memory_format"(%1058, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x8734c90) { | |
| %1060 = "torch.valsem.aten.copy"(%1059, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x8734dc0) { | |
| %1061 = "torch.aten.bitwise_not"(%1060) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8734eb0) { | |
| %1062 = "torch.aten.mul.Tensor"(%1057, %1061) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8734fc0) { | |
| %1063 = "torch.aten.view"(%1062, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x87350d0) { | |
| %1064 = "torch.aten.transpose.int"(%1038, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8735200) { | |
| %1065 = "torch.aten.view"(%1064, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x8735310) { | |
| %1066 = "torch.aten.bmm"(%1063, %1065) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8735420) { | |
| %1067 = "torch.aten.view"(%1066, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x8735530) { | |
| %1068 = "torch.aten.permute"(%1067, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x8735640) { | |
| %1069 = "torch.aten.clone"(%1068, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8735750) { | |
| %1070 = "torch.aten.view"(%1069, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8735860) { | |
| %1071 = "torch.aten.transpose.int"(%230, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8735990) { | |
| %1072 = "torch.aten.view"(%1070, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8735aa0) { | |
| %1073 = "torch.aten.mm"(%1072, %1071) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8735bb0) { | |
| %1074 = "torch.aten.mul.Scalar"(%229, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8735cc0) { | |
| %1075 = "torch.aten.add.Tensor"(%1074, %1073, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8735df0) { | |
| %1076 = "torch.aten.view"(%1075, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8735f00) { | |
| %1077 = "torch.aten.add.Tensor"(%1010, %1076, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8736030) { | |
| %1078 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8736120) { | |
| %1079 = "torch.aten.sum.dim_IntList"(%1077, %1078, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8736270) { | |
| %1080 = "torch.aten.div.Scalar"(%1079, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8736380) { | |
| %1081 = "torch.aten.size.int"(%1077, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8736490) { | |
| %1082 = "torch.prim.ListConstruct"(%1081, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x87365c0) { | |
| %1083 = "torch.aten.broadcast_to"(%1080, %1082) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x87366d0) { | |
| %1084 = "torch.aten.sub.Tensor"(%1077, %1083, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8736800) { | |
| %1085 = "torch.aten.mul.Tensor"(%1084, %1084) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8736910) { | |
| %1086 = "torch.aten.sum.dim_IntList"(%1085, %1078, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8736a60) { | |
| %1087 = "torch.aten.div.Scalar"(%1086, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8736b70) { | |
| %1088 = "torch.aten.add.Scalar"(%1087, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8736ca0) { | |
| %1089 = "torch.aten.rsqrt"(%1088) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8736d90) { | |
| %1090 = "torch.aten.size.int"(%1077, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8736ea0) { | |
| %1091 = "torch.prim.ListConstruct"(%1090, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8736fd0) { | |
| %1092 = "torch.aten.broadcast_to"(%1089, %1091) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87370e0) { | |
| %1093 = "torch.aten.mul.Tensor"(%1084, %1092) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87371f0) { | |
| %1094 = "torch.aten.mul.Tensor"(%1093, %228) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8737300) { | |
| %1095 = "torch.aten.add.Tensor"(%1094, %227, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8737430) { | |
| %1096 = "torch.aten.transpose.int"(%226, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8737560) { | |
| %1097 = "torch.aten.view"(%1095, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8737670) { | |
| %1098 = "torch.aten.mm"(%1097, %1096) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8737780) { | |
| %1099 = "torch.aten.mul.Scalar"(%225, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8737890) { | |
| %1100 = "torch.aten.add.Tensor"(%1099, %1098, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87379c0) { | |
| %1101 = "torch.aten.view"(%1100, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8737ad0) { | |
| %1102 = "torch.aten.mul.Scalar"(%1101, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8737be0) { | |
| %1103 = "torch.aten.mul.Scalar"(%1101, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8737cf0) { | |
| %1104 = "torch.aten.mul.Scalar"(%1101, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8737e00) { | |
| %1105 = "torch.aten.mul.Tensor"(%1104, %1101) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8737f10) { | |
| %1106 = "torch.aten.add.Scalar"(%1105, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8738040) { | |
| %1107 = "torch.aten.mul.Tensor"(%1103, %1106) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x8738150) { | |
| %1108 = "torch.aten.tanh"(%1107) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8738240) { | |
| %1109 = "torch.aten.add.Scalar"(%1108, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8738370) { | |
| %1110 = "torch.aten.mul.Tensor"(%1102, %1109) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8738480) { | |
| %1111 = "torch.aten.transpose.int"(%224, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87385b0) { | |
| %1112 = "torch.aten.view"(%1110, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x87386c0) { | |
| %1113 = "torch.aten.mm"(%1112, %1111) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870a570) { | |
| %1114 = "torch.aten.mul.Scalar"(%223, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870a680) { | |
| %1115 = "torch.aten.add.Tensor"(%1114, %1113, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870a7b0) { | |
| %1116 = "torch.aten.view"(%1115, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870a8c0) { | |
| %1117 = "torch.aten.add.Tensor"(%1077, %1116, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870a9f0) { | |
| %1118 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x870aae0) { | |
| %1119 = "torch.aten.sum.dim_IntList"(%1117, %1118, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x870ac30) { | |
| %1120 = "torch.aten.div.Scalar"(%1119, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x87397a0) { | |
| %1121 = "torch.aten.size.int"(%1117, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87398b0) { | |
| %1122 = "torch.prim.ListConstruct"(%1121, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x87399e0) { | |
| %1123 = "torch.aten.broadcast_to"(%1120, %1122) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8739af0) { | |
| %1124 = "torch.aten.sub.Tensor"(%1117, %1123, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8739c20) { | |
| %1125 = "torch.aten.mul.Tensor"(%1124, %1124) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8739d30) { | |
| %1126 = "torch.aten.sum.dim_IntList"(%1125, %1118, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8739e80) { | |
| %1127 = "torch.aten.div.Scalar"(%1126, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8739f90) { | |
| %1128 = "torch.aten.add.Scalar"(%1127, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x873a0c0) { | |
| %1129 = "torch.aten.rsqrt"(%1128) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x873a1b0) { | |
| %1130 = "torch.aten.size.int"(%1117, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x873a2c0) { | |
| %1131 = "torch.prim.ListConstruct"(%1130, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x873a3f0) { | |
| %1132 = "torch.aten.broadcast_to"(%1129, %1131) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x873a500) { | |
| %1133 = "torch.aten.mul.Tensor"(%1124, %1132) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x873a610) { | |
| %1134 = "torch.aten.mul.Tensor"(%1133, %222) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x873a720) { | |
| %1135 = "torch.aten.add.Tensor"(%1134, %221, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x873a850) { | |
| %1136 = "torch.aten.transpose.int"(%220, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873a980) { | |
| %1137 = "torch.aten.view"(%1135, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x873aa90) { | |
| %1138 = "torch.aten.mm"(%1137, %1136) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x873aba0) { | |
| %1139 = "torch.aten.mul.Scalar"(%219, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x873acb0) { | |
| %1140 = "torch.aten.add.Tensor"(%1139, %1138, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873ade0) { | |
| %1141 = "torch.aten.view"(%1140, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873aef0) { | |
| %1142 = "torch.aten.view"(%1141, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x873b0f0) { | |
| %1143 = "torch.aten.slice.Tensor"(%1142, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x873b260) { | |
| %1144 = "torch.aten.slice.Tensor"(%1142, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x873b3d0) { | |
| %1145 = "torch.aten.slice.Tensor"(%1142, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x873b540) { | |
| %1146 = "torch.aten.transpose.int"(%1143, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873b670) { | |
| %1147 = "torch.aten.view"(%1146, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x873b780) { | |
| %1148 = "torch.aten.permute"(%1144, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873b890) { | |
| %1149 = "torch.aten.view"(%1148, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x873b9a0) { | |
| %1150 = "torch.aten.bmm"(%1147, %1149) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x873bab0) { | |
| %1151 = "torch.aten.mul.Scalar"(%1150, %330) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x873bbc0) { | |
| %1152 = "torch.aten.mul.Scalar"(%436, %331) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x873bcd0) { | |
| %1153 = "torch.aten.add.Tensor"(%1151, %1152, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873be00) { | |
| %1154 = "torch.aten.view"(%1153, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x873bf10) { | |
| %1155 = "torch.aten.mul.Scalar"(%1154, %332) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x873c020) { | |
| %1156 = "torch.aten.add.Tensor"(%1155, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x873c150) { | |
| %1157 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x873c260) { | |
| %1158 = "torch.aten.maximum"(%1156, %1157) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x859b1a0) { | |
| %1159:2 = "torch.aten.max.dim"(%1158, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x87080e0) { | |
| %1160 = "torch.aten.sub.Tensor"(%1158, %1159#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x8708210) { | |
| %1161 = "torch.aten.exp"(%1160) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8708300) { | |
| %1162 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x873b000) { | |
| %1163 = "torch.aten.sum.dim_IntList"(%1161, %1162, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x8708450) { | |
| %1164 = "torch.aten.div.Tensor"(%1161, %1163) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8734a10) { | |
| %1165 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x87086b0) { | |
| %1166 = "torch.aten.empty.memory_format"(%1165, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x873d3a0) { | |
| %1167 = "torch.valsem.aten.copy"(%1166, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x873d4d0) { | |
| %1168 = "torch.aten.bitwise_not"(%1167) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x873d5c0) { | |
| %1169 = "torch.aten.mul.Tensor"(%1164, %1168) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873d6d0) { | |
| %1170 = "torch.aten.view"(%1169, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x873d7e0) { | |
| %1171 = "torch.aten.transpose.int"(%1145, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873d910) { | |
| %1172 = "torch.aten.view"(%1171, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x873da20) { | |
| %1173 = "torch.aten.bmm"(%1170, %1172) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873db30) { | |
| %1174 = "torch.aten.view"(%1173, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x873dc40) { | |
| %1175 = "torch.aten.permute"(%1174, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x873dd50) { | |
| %1176 = "torch.aten.clone"(%1175, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873de60) { | |
| %1177 = "torch.aten.view"(%1176, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x873df70) { | |
| %1178 = "torch.aten.transpose.int"(%218, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873e0a0) { | |
| %1179 = "torch.aten.view"(%1177, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x873e1b0) { | |
| %1180 = "torch.aten.mm"(%1179, %1178) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x873e2c0) { | |
| %1181 = "torch.aten.mul.Scalar"(%217, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x873e3d0) { | |
| %1182 = "torch.aten.add.Tensor"(%1181, %1180, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x873e500) { | |
| %1183 = "torch.aten.view"(%1182, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x873e610) { | |
| %1184 = "torch.aten.add.Tensor"(%1117, %1183, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x873e740) { | |
| %1185 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x873e830) { | |
| %1186 = "torch.aten.sum.dim_IntList"(%1184, %1185, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x873e980) { | |
| %1187 = "torch.aten.div.Scalar"(%1186, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x873ea90) { | |
| %1188 = "torch.aten.size.int"(%1184, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x873eba0) { | |
| %1189 = "torch.prim.ListConstruct"(%1188, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x873ecd0) { | |
| %1190 = "torch.aten.broadcast_to"(%1187, %1189) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8712380) { | |
| %1191 = "torch.aten.sub.Tensor"(%1184, %1190, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87124b0) { | |
| %1192 = "torch.aten.mul.Tensor"(%1191, %1191) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87125c0) { | |
| %1193 = "torch.aten.sum.dim_IntList"(%1192, %1185, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8712710) { | |
| %1194 = "torch.aten.div.Scalar"(%1193, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8712820) { | |
| %1195 = "torch.aten.add.Scalar"(%1194, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8712950) { | |
| %1196 = "torch.aten.rsqrt"(%1195) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8712a40) { | |
| %1197 = "torch.aten.size.int"(%1184, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x873fdb0) { | |
| %1198 = "torch.prim.ListConstruct"(%1197, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x873fee0) { | |
| %1199 = "torch.aten.broadcast_to"(%1196, %1198) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x873fff0) { | |
| %1200 = "torch.aten.mul.Tensor"(%1191, %1199) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8740100) { | |
| %1201 = "torch.aten.mul.Tensor"(%1200, %216) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8741220) { | |
| %1202 = "torch.aten.add.Tensor"(%1201, %215, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8741350) { | |
| %1203 = "torch.aten.transpose.int"(%214, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8741480) { | |
| %1204 = "torch.aten.view"(%1202, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8741590) { | |
| %1205 = "torch.aten.mm"(%1204, %1203) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x87416a0) { | |
| %1206 = "torch.aten.mul.Scalar"(%213, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x87417b0) { | |
| %1207 = "torch.aten.add.Tensor"(%1206, %1205, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87418e0) { | |
| %1208 = "torch.aten.view"(%1207, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x87419f0) { | |
| %1209 = "torch.aten.mul.Scalar"(%1208, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8741b00) { | |
| %1210 = "torch.aten.mul.Scalar"(%1208, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8741c10) { | |
| %1211 = "torch.aten.mul.Scalar"(%1208, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8741d20) { | |
| %1212 = "torch.aten.mul.Tensor"(%1211, %1208) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8741e30) { | |
| %1213 = "torch.aten.add.Scalar"(%1212, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8741f60) { | |
| %1214 = "torch.aten.mul.Tensor"(%1210, %1213) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x8742070) { | |
| %1215 = "torch.aten.tanh"(%1214) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8742160) { | |
| %1216 = "torch.aten.add.Scalar"(%1215, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8742290) { | |
| %1217 = "torch.aten.mul.Tensor"(%1209, %1216) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x87423a0) { | |
| %1218 = "torch.aten.transpose.int"(%212, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870f370) { | |
| %1219 = "torch.aten.view"(%1217, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x870f480) { | |
| %1220 = "torch.aten.mm"(%1219, %1218) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x870f590) { | |
| %1221 = "torch.aten.mul.Scalar"(%211, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870f6a0) { | |
| %1222 = "torch.aten.add.Tensor"(%1221, %1220, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x870f7d0) { | |
| %1223 = "torch.aten.view"(%1222, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x870f8e0) { | |
| %1224 = "torch.aten.add.Tensor"(%1184, %1223, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x870fa10) { | |
| %1225 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8743480) { | |
| %1226 = "torch.aten.sum.dim_IntList"(%1224, %1225, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x87435b0) { | |
| %1227 = "torch.aten.div.Scalar"(%1226, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x87436c0) { | |
| %1228 = "torch.aten.size.int"(%1224, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87437d0) { | |
| %1229 = "torch.prim.ListConstruct"(%1228, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8743900) { | |
| %1230 = "torch.aten.broadcast_to"(%1227, %1229) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8743a10) { | |
| %1231 = "torch.aten.sub.Tensor"(%1224, %1230, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8743b40) { | |
| %1232 = "torch.aten.mul.Tensor"(%1231, %1231) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8743c50) { | |
| %1233 = "torch.aten.sum.dim_IntList"(%1232, %1225, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8743da0) { | |
| %1234 = "torch.aten.div.Scalar"(%1233, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8743eb0) { | |
| %1235 = "torch.aten.add.Scalar"(%1234, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8743fe0) { | |
| %1236 = "torch.aten.rsqrt"(%1235) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x87440d0) { | |
| %1237 = "torch.aten.size.int"(%1224, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87441e0) { | |
| %1238 = "torch.prim.ListConstruct"(%1237, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8744310) { | |
| %1239 = "torch.aten.broadcast_to"(%1236, %1238) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8744420) { | |
| %1240 = "torch.aten.mul.Tensor"(%1231, %1239) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8744530) { | |
| %1241 = "torch.aten.mul.Tensor"(%1240, %210) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8744640) { | |
| %1242 = "torch.aten.add.Tensor"(%1241, %209, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8744770) { | |
| %1243 = "torch.aten.transpose.int"(%208, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87448a0) { | |
| %1244 = "torch.aten.view"(%1242, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x87449b0) { | |
| %1245 = "torch.aten.mm"(%1244, %1243) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8744ac0) { | |
| %1246 = "torch.aten.mul.Scalar"(%207, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8744bd0) { | |
| %1247 = "torch.aten.add.Tensor"(%1246, %1245, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8744d00) { | |
| %1248 = "torch.aten.view"(%1247, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8744e10) { | |
| %1249 = "torch.aten.view"(%1248, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8745010) { | |
| %1250 = "torch.aten.slice.Tensor"(%1249, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8745180) { | |
| %1251 = "torch.aten.slice.Tensor"(%1249, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x87452f0) { | |
| %1252 = "torch.aten.slice.Tensor"(%1249, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8745460) { | |
| %1253 = "torch.aten.transpose.int"(%1250, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87465a0) { | |
| %1254 = "torch.aten.view"(%1253, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x87466b0) { | |
| %1255 = "torch.aten.permute"(%1251, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87467c0) { | |
| %1256 = "torch.aten.view"(%1255, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x87468d0) { | |
| %1257 = "torch.aten.bmm"(%1254, %1256) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x87469e0) { | |
| %1258 = "torch.aten.mul.Scalar"(%1257, %333) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8746af0) { | |
| %1259 = "torch.aten.mul.Scalar"(%436, %334) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8746c00) { | |
| %1260 = "torch.aten.add.Tensor"(%1258, %1259, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8746d30) { | |
| %1261 = "torch.aten.view"(%1260, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8746e40) { | |
| %1262 = "torch.aten.mul.Scalar"(%1261, %335) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8746f50) { | |
| %1263 = "torch.aten.add.Tensor"(%1262, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x8747080) { | |
| %1264 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x8747190) { | |
| %1265 = "torch.aten.maximum"(%1263, %1264) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x87472b0) { | |
| %1266:2 = "torch.aten.max.dim"(%1265, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8747440) { | |
| %1267 = "torch.aten.sub.Tensor"(%1265, %1266#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x8747570) { | |
| %1268 = "torch.aten.exp"(%1267) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8747660) { | |
| %1269 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8744f20) { | |
| %1270 = "torch.aten.sum.dim_IntList"(%1268, %1269, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x87477b0) { | |
| %1271 = "torch.aten.div.Tensor"(%1268, %1270) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87085c0) { | |
| %1272 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x8747a10) { | |
| %1273 = "torch.aten.empty.memory_format"(%1272, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x8747ba0) { | |
| %1274 = "torch.valsem.aten.copy"(%1273, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x8747cd0) { | |
| %1275 = "torch.aten.bitwise_not"(%1274) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8747dc0) { | |
| %1276 = "torch.aten.mul.Tensor"(%1271, %1275) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8748ee0) { | |
| %1277 = "torch.aten.view"(%1276, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8748ff0) { | |
| %1278 = "torch.aten.transpose.int"(%1252, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8749120) { | |
| %1279 = "torch.aten.view"(%1278, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x8749230) { | |
| %1280 = "torch.aten.bmm"(%1277, %1279) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8749340) { | |
| %1281 = "torch.aten.view"(%1280, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x8749450) { | |
| %1282 = "torch.aten.permute"(%1281, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x8749560) { | |
| %1283 = "torch.aten.clone"(%1282, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8749670) { | |
| %1284 = "torch.aten.view"(%1283, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8749780) { | |
| %1285 = "torch.aten.transpose.int"(%206, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x874a8c0) { | |
| %1286 = "torch.aten.view"(%1284, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x874a9d0) { | |
| %1287 = "torch.aten.mm"(%1286, %1285) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x874aae0) { | |
| %1288 = "torch.aten.mul.Scalar"(%205, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x874abf0) { | |
| %1289 = "torch.aten.add.Tensor"(%1288, %1287, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x874ad20) { | |
| %1290 = "torch.aten.view"(%1289, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x874ae30) { | |
| %1291 = "torch.aten.add.Tensor"(%1224, %1290, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x874bf70) { | |
| %1292 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x874c060) { | |
| %1293 = "torch.aten.sum.dim_IntList"(%1291, %1292, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x874c1b0) { | |
| %1294 = "torch.aten.div.Scalar"(%1293, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x874c2c0) { | |
| %1295 = "torch.aten.size.int"(%1291, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x874d3e0) { | |
| %1296 = "torch.prim.ListConstruct"(%1295, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x874d510) { | |
| %1297 = "torch.aten.broadcast_to"(%1294, %1296) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x874d620) { | |
| %1298 = "torch.aten.sub.Tensor"(%1291, %1297, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x874d750) { | |
| %1299 = "torch.aten.mul.Tensor"(%1298, %1298) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x874d860) { | |
| %1300 = "torch.aten.sum.dim_IntList"(%1299, %1292, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x874d9b0) { | |
| %1301 = "torch.aten.div.Scalar"(%1300, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x874dac0) { | |
| %1302 = "torch.aten.add.Scalar"(%1301, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x874dbf0) { | |
| %1303 = "torch.aten.rsqrt"(%1302) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x874dce0) { | |
| %1304 = "torch.aten.size.int"(%1291, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x874ddf0) { | |
| %1305 = "torch.prim.ListConstruct"(%1304, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x874df20) { | |
| %1306 = "torch.aten.broadcast_to"(%1303, %1305) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x874e030) { | |
| %1307 = "torch.aten.mul.Tensor"(%1298, %1306) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x874e140) { | |
| %1308 = "torch.aten.mul.Tensor"(%1307, %204) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x874e250) { | |
| %1309 = "torch.aten.add.Tensor"(%1308, %203, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x874e380) { | |
| %1310 = "torch.aten.transpose.int"(%202, %306, %2) : (!torch.vtensor<[4096,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x874e4b0) { | |
| %1311 = "torch.aten.view"(%1309, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x874e5c0) { | |
| %1312 = "torch.aten.mm"(%1311, %1310) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,4096],f32>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x874e6d0) { | |
| %1313 = "torch.aten.mul.Scalar"(%201, %2) : (!torch.vtensor<[4096],f32>, !torch.int) -> !torch.vtensor<[4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x874e7e0) { | |
| %1314 = "torch.aten.add.Tensor"(%1313, %1312, %2) : (!torch.vtensor<[4096],f32>, !torch.vtensor<[128,4096],f32>, !torch.int) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x874e910) { | |
| %1315 = "torch.aten.view"(%1314, %564) : (!torch.vtensor<[128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x874ea20) { | |
| %1316 = "torch.aten.mul.Scalar"(%1315, %318) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x874eb30) { | |
| %1317 = "torch.aten.mul.Scalar"(%1315, %319) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x874ec40) { | |
| %1318 = "torch.aten.mul.Scalar"(%1315, %320) : (!torch.vtensor<[1,128,4096],f32>, !torch.float) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x874ed50) { | |
| %1319 = "torch.aten.mul.Tensor"(%1318, %1315) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x874ee60) { | |
| %1320 = "torch.aten.add.Scalar"(%1319, %2, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x874ef90) { | |
| %1321 = "torch.aten.mul.Tensor"(%1317, %1320) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.tanh'(0x874f0a0) { | |
| %1322 = "torch.aten.tanh"(%1321) : (!torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x874f190) { | |
| %1323 = "torch.aten.add.Scalar"(%1322, %312, %2) : (!torch.vtensor<[1,128,4096],f32>, !torch.float, !torch.int) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x874f2c0) { | |
| %1324 = "torch.aten.mul.Tensor"(%1316, %1323) : (!torch.vtensor<[1,128,4096],f32>, !torch.vtensor<[1,128,4096],f32>) -> !torch.vtensor<[1,128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x874f3d0) { | |
| %1325 = "torch.aten.transpose.int"(%200, %306, %2) : (!torch.vtensor<[1024,4096],f32>, !torch.int, !torch.int) -> !torch.vtensor<[4096,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x874f500) { | |
| %1326 = "torch.aten.view"(%1324, %576) : (!torch.vtensor<[1,128,4096],f32>, !torch.list<int>) -> !torch.vtensor<[128,4096],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x874f610) { | |
| %1327 = "torch.aten.mm"(%1326, %1325) : (!torch.vtensor<[128,4096],f32>, !torch.vtensor<[4096,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x874f720) { | |
| %1328 = "torch.aten.mul.Scalar"(%199, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x874f830) { | |
| %1329 = "torch.aten.add.Tensor"(%1328, %1327, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x874f960) { | |
| %1330 = "torch.aten.view"(%1329, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x874fa70) { | |
| %1331 = "torch.aten.add.Tensor"(%1291, %1330, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x874fba0) { | |
| %1332 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x874fc90) { | |
| %1333 = "torch.aten.sum.dim_IntList"(%1331, %1332, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x874fde0) { | |
| %1334 = "torch.aten.div.Scalar"(%1333, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x874fef0) { | |
| %1335 = "torch.aten.size.int"(%1331, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8750000) { | |
| %1336 = "torch.prim.ListConstruct"(%1335, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8750130) { | |
| %1337 = "torch.aten.broadcast_to"(%1334, %1336) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8751250) { | |
| %1338 = "torch.aten.sub.Tensor"(%1331, %1337, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8751380) { | |
| %1339 = "torch.aten.mul.Tensor"(%1338, %1338) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8751490) { | |
| %1340 = "torch.aten.sum.dim_IntList"(%1339, %1332, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x87515e0) { | |
| %1341 = "torch.aten.div.Scalar"(%1340, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x87516f0) { | |
| %1342 = "torch.aten.add.Scalar"(%1341, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8751820) { | |
| %1343 = "torch.aten.rsqrt"(%1342) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8720ca0) { | |
| %1344 = "torch.aten.size.int"(%1331, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8720db0) { | |
| %1345 = "torch.prim.ListConstruct"(%1344, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8720ee0) { | |
| %1346 = "torch.aten.broadcast_to"(%1343, %1345) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8720ff0) { | |
| %1347 = "torch.aten.mul.Tensor"(%1338, %1346) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8721100) { | |
| %1348 = "torch.aten.mul.Tensor"(%1347, %198) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8721210) { | |
| %1349 = "torch.aten.add.Tensor"(%1348, %197, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8721340) { | |
| %1350 = "torch.aten.transpose.int"(%196, %306, %2) : (!torch.vtensor<[3072,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8721470) { | |
| %1351 = "torch.aten.view"(%1349, %482) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8721580) { | |
| %1352 = "torch.aten.mm"(%1351, %1350) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,3072],f32>) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8721690) { | |
| %1353 = "torch.aten.mul.Scalar"(%195, %2) : (!torch.vtensor<[3072],f32>, !torch.int) -> !torch.vtensor<[3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x87217a0) { | |
| %1354 = "torch.aten.add.Tensor"(%1353, %1352, %2) : (!torch.vtensor<[3072],f32>, !torch.vtensor<[128,3072],f32>, !torch.int) -> !torch.vtensor<[128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87218d0) { | |
| %1355 = "torch.aten.view"(%1354, %487) : (!torch.vtensor<[128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,3072],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x87229f0) { | |
| %1356 = "torch.aten.view"(%1355, %489) : (!torch.vtensor<[1,128,3072],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,192],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8722bf0) { | |
| %1357 = "torch.aten.slice.Tensor"(%1356, %303, %306, %315, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8722d60) { | |
| %1358 = "torch.aten.slice.Tensor"(%1356, %303, %315, %1, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.slice.Tensor'(0x8722ed0) { | |
| %1359 = "torch.aten.slice.Tensor"(%1356, %303, %1, %314, %2) : (!torch.vtensor<[1,128,16,192],f32>, !torch.int, !torch.int, !torch.int, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x8723040) { | |
| %1360 = "torch.aten.transpose.int"(%1357, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8723170) { | |
| %1361 = "torch.aten.view"(%1360, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x8723280) { | |
| %1362 = "torch.aten.permute"(%1358, %497) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8723390) { | |
| %1363 = "torch.aten.view"(%1362, %499) : (!torch.vtensor<[1,16,64,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,64,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x87234a0) { | |
| %1364 = "torch.aten.bmm"(%1361, %1363) : (!torch.vtensor<[16,128,64],f32>, !torch.vtensor<[16,64,128],f32>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x87235b0) { | |
| %1365 = "torch.aten.mul.Scalar"(%1364, %336) : (!torch.vtensor<[16,128,128],f32>, !torch.float) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x87236c0) { | |
| %1366 = "torch.aten.mul.Scalar"(%436, %316) : (!torch.vtensor<[16,1,128],f32>, !torch.float) -> !torch.vtensor<[16,1,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x87237d0) { | |
| %1367 = "torch.aten.add.Tensor"(%1365, %1366, %2) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,1,128],f32>, !torch.int) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8723900) { | |
| %1368 = "torch.aten.view"(%1367, %505) : (!torch.vtensor<[16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8723a10) { | |
| %1369 = "torch.aten.mul.Scalar"(%1368, %337) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8723b20) { | |
| %1370 = "torch.aten.add.Tensor"(%1369, %462, %2) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],f32>, !torch.int) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x8723c50) { | |
| %1371 = "torch.aten.clone"(%291, %300) : (!torch.vtensor<[],f32>, !torch.none) -> !torch.vtensor<[],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.maximum'(0x8724d70) { | |
| %1372 = "torch.aten.maximum"(%1370, %1371) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.max.dim'(0x8724e90) { | |
| %1373:2 = "torch.aten.max.dim"(%1372, %303, %3) : (!torch.vtensor<[1,16,128,128],f32>, !torch.int, !torch.bool) -> (!torch.vtensor<[1,16,128,1],f32>, !torch.vtensor<[1,16,128,1],si64>) | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8725020) { | |
| %1374 = "torch.aten.sub.Tensor"(%1372, %1373#0, %312) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>, !torch.float) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.exp'(0x8725150) { | |
| %1375 = "torch.aten.exp"(%1374) : (!torch.vtensor<[1,16,128,128],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8725240) { | |
| %1376 = "torch.prim.ListConstruct"(%303) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x8722b00) { | |
| %1377 = "torch.aten.sum.dim_IntList"(%1375, %1376, %3, %300) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[1,16,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Tensor'(0x8725390) { | |
| %1378 = "torch.aten.div.Tensor"(%1375, %1377) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,16,128,1],f32>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8747920) { | |
| %1379 = "torch.prim.ListConstruct"(%2, %2, %1, %1) : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.empty.memory_format'(0x87255f0) { | |
| %1380 = "torch.aten.empty.memory_format"(%1379, %346, %300, %300, %300, %300) : (!torch.list<int>, !torch.int, !torch.none, !torch.none, !torch.none, !torch.none) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.valsem.aten.copy'(0x8725780) { | |
| %1381 = "torch.valsem.aten.copy"(%1380, %462, %302) : (!torch.vtensor<[1,1,128,128],i1>, !torch.vtensor<[1,1,128,128],f32>, !torch.bool) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bitwise_not'(0x87268c0) { | |
| %1382 = "torch.aten.bitwise_not"(%1381) : (!torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,1,128,128],i1> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x87269b0) { | |
| %1383 = "torch.aten.mul.Tensor"(%1378, %1382) : (!torch.vtensor<[1,16,128,128],f32>, !torch.vtensor<[1,1,128,128],i1>) -> !torch.vtensor<[1,16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8726ac0) { | |
| %1384 = "torch.aten.view"(%1383, %522) : (!torch.vtensor<[1,16,128,128],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,128],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x875d8c0) { | |
| %1385 = "torch.aten.transpose.int"(%1359, %2, %307) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x875e9a0) { | |
| %1386 = "torch.aten.view"(%1385, %495) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.bmm'(0x875ea90) { | |
| %1387 = "torch.aten.bmm"(%1384, %1386) : (!torch.vtensor<[16,128,128],f32>, !torch.vtensor<[16,128,64],f32>) -> !torch.vtensor<[16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x875eba0) { | |
| %1388 = "torch.aten.view"(%1387, %527) : (!torch.vtensor<[16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,16,128,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.permute'(0x875ecb0) { | |
| %1389 = "torch.aten.permute"(%1388, %529) : (!torch.vtensor<[1,16,128,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.clone'(0x875edc0) { | |
| %1390 = "torch.aten.clone"(%1389, %306) : (!torch.vtensor<[1,128,16,64],f32>, !torch.int) -> !torch.vtensor<[1,128,16,64],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x875eed0) { | |
| %1391 = "torch.aten.view"(%1390, %532) : (!torch.vtensor<[1,128,16,64],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.transpose.int'(0x875efe0) { | |
| %1392 = "torch.aten.transpose.int"(%194, %306, %2) : (!torch.vtensor<[1024,1024],f32>, !torch.int, !torch.int) -> !torch.vtensor<[1024,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8760120) { | |
| %1393 = "torch.aten.view"(%1391, %482) : (!torch.vtensor<[1,128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mm'(0x8760230) { | |
| %1394 = "torch.aten.mm"(%1393, %1392) : (!torch.vtensor<[128,1024],f32>, !torch.vtensor<[1024,1024],f32>) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Scalar'(0x8760340) { | |
| %1395 = "torch.aten.mul.Scalar"(%193, %2) : (!torch.vtensor<[1024],f32>, !torch.int) -> !torch.vtensor<[1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8760450) { | |
| %1396 = "torch.aten.add.Tensor"(%1395, %1394, %2) : (!torch.vtensor<[1024],f32>, !torch.vtensor<[128,1024],f32>, !torch.int) -> !torch.vtensor<[128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.view'(0x8760580) { | |
| %1397 = "torch.aten.view"(%1396, %532) : (!torch.vtensor<[128,1024],f32>, !torch.list<int>) -> !torch.vtensor<[1,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Tensor'(0x8760690) { | |
| %1398 = "torch.aten.add.Tensor"(%1331, %1397, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[1,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x87607c0) { | |
| %1399 = "torch.prim.ListConstruct"(%307) : (!torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87608b0) { | |
| %1400 = "torch.aten.sum.dim_IntList"(%1398, %1399, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x8760a00) { | |
| %1401 = "torch.aten.div.Scalar"(%1400, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8760b10) { | |
| %1402 = "torch.aten.size.int"(%1398, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.prim.ListConstruct'(0x8760c20) { | |
| %1403 = "torch.prim.ListConstruct"(%1402, %1, %0) : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.broadcast_to'(0x8760d50) { | |
| %1404 = "torch.aten.broadcast_to"(%1401, %1403) : (!torch.vtensor<[?,128,1],f32>, !torch.list<int>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sub.Tensor'(0x8760e60) { | |
| %1405 = "torch.aten.sub.Tensor"(%1398, %1404, %2) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.mul.Tensor'(0x8760f90) { | |
| %1406 = "torch.aten.mul.Tensor"(%1405, %1405) : (!torch.vtensor<[?,128,1024],f32>, !torch.vtensor<[?,128,1024],f32>) -> !torch.vtensor<[?,128,1024],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.sum.dim_IntList'(0x87610a0) { | |
| %1407 = "torch.aten.sum.dim_IntList"(%1406, %1399, %3, %300) : (!torch.vtensor<[?,128,1024],f32>, !torch.list<int>, !torch.bool, !torch.none) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.div.Scalar'(0x87611f0) { | |
| %1408 = "torch.aten.div.Scalar"(%1407, %0) : (!torch.vtensor<[?,128,1],f32>, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.add.Scalar'(0x8761300) { | |
| %1409 = "torch.aten.add.Scalar"(%1408, %305, %2) : (!torch.vtensor<[?,128,1],f32>, !torch.float, !torch.int) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.rsqrt'(0x8761430) { | |
| %1410 = "torch.aten.rsqrt"(%1409) : (!torch.vtensor<[?,128,1],f32>) -> !torch.vtensor<[?,128,1],f32> | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : 'torch.aten.size.int'(0x8761520) { | |
| %1411 = "torch.aten.size.int"(%1398, %306) : (!torch.vtensor<[?,128,1024],f32>, !torch.int) -> !torch.int | |
| } -> SUCCESS : operation marked legal by the target | |
| //===-------------------------------------------===// | |
| //===-------------------------------------------===// | |
| Legalizing operation : |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment