Created
January 3, 2025 17:46
-
-
Save pashu123/f758f3b51671045d1e9a4f8b4598574e to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
module @compiled_scheduled_unet { | |
util.global private @_params.unet.conv_in.weight {noinline} = #stream.parameter.named<"model"::"unet.conv_in.weight"> : tensor<320x4x3x3xf16> | |
util.global private @_params.unet.conv_in.bias {noinline} = #stream.parameter.named<"model"::"unet.conv_in.bias"> : tensor<320xf16> | |
util.global private @_params.unet.time_embedding.linear_1.weight {noinline} = #stream.parameter.named<"model"::"unet.time_embedding.linear_1.weight"> : tensor<1280x320xf16> | |
util.global private @_params.unet.time_embedding.linear_1.bias {noinline} = #stream.parameter.named<"model"::"unet.time_embedding.linear_1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.time_embedding.linear_2.weight {noinline} = #stream.parameter.named<"model"::"unet.time_embedding.linear_2.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.time_embedding.linear_2.bias {noinline} = #stream.parameter.named<"model"::"unet.time_embedding.linear_2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.add_embedding.linear_1.weight {noinline} = #stream.parameter.named<"model"::"unet.add_embedding.linear_1.weight"> : tensor<1280x2816xf16> | |
util.global private @_params.unet.add_embedding.linear_1.bias {noinline} = #stream.parameter.named<"model"::"unet.add_embedding.linear_1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.add_embedding.linear_2.weight {noinline} = #stream.parameter.named<"model"::"unet.add_embedding.linear_2.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.add_embedding.linear_2.bias {noinline} = #stream.parameter.named<"model"::"unet.add_embedding.linear_2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.norm1.weight"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.norm1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.conv1.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.conv1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.time_emb_proj.weight"> : tensor<320x1280xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.time_emb_proj.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.norm2.weight"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.norm2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.conv2.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.0.conv2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.norm1.weight"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.norm1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.conv1.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.conv1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.time_emb_proj.weight"> : tensor<320x1280xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.time_emb_proj.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.norm2.weight"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.norm2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.conv2.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.down_blocks.0.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.resnets.1.conv2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.0.downsamplers.0.conv.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.downsamplers.0.conv.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.down_blocks.0.downsamplers.0.conv.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.0.downsamplers.0.conv.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.norm.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.norm.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.proj_in.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.proj_in.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.proj_out.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.0.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.0.proj_out.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.norm.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.norm.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.proj_in.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.proj_in.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.proj_out.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.down_blocks.1.attentions.1.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.attentions.1.proj_out.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.norm1.weight"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.norm1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.conv1.weight"> : tensor<640x320x3x3xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.conv1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.time_emb_proj.weight"> : tensor<640x1280xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.time_emb_proj.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.conv2.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.conv2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.conv_shortcut.weight"> : tensor<640x320x1x1xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.0.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.0.conv_shortcut.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.conv1.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.conv1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.time_emb_proj.weight"> : tensor<640x1280xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.time_emb_proj.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.conv2.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.down_blocks.1.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.resnets.1.conv2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.1.downsamplers.0.conv.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.downsamplers.0.conv.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.down_blocks.1.downsamplers.0.conv.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.1.downsamplers.0.conv.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.norm.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.norm.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.proj_in.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.proj_in.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.proj_out.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.0.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.0.proj_out.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.norm.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.norm.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.proj_in.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.proj_in.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.proj_out.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.attentions.1.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.attentions.1.proj_out.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.conv1.weight"> : tensor<1280x640x3x3xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.conv_shortcut.weight"> : tensor<1280x640x1x1xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.0.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.0.conv_shortcut.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.conv1.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.down_blocks.2.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.down_blocks.2.resnets.1.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.norm.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.norm.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.proj_in.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.proj_in.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.proj_out.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.0.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.0.proj_out.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.norm.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.norm.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.proj_in.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.proj_in.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.proj_out.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.1.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.1.proj_out.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.norm.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.norm.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.proj_in.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.proj_in.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.proj_out.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.attentions.2.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.attentions.2.proj_out.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.norm1.weight"> : tensor<2560xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.norm1.bias"> : tensor<2560xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.conv1.weight"> : tensor<1280x2560x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.conv_shortcut.weight"> : tensor<1280x2560x1x1xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.0.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.0.conv_shortcut.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.norm1.weight"> : tensor<2560xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.norm1.bias"> : tensor<2560xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.conv1.weight"> : tensor<1280x2560x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.conv_shortcut.weight"> : tensor<1280x2560x1x1xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.1.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.1.conv_shortcut.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.norm1.weight"> : tensor<1920xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.norm1.bias"> : tensor<1920xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.conv1.weight"> : tensor<1280x1920x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.conv_shortcut.weight"> : tensor<1280x1920x1x1xf16> | |
util.global private @_params.unet.up_blocks.0.resnets.2.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.resnets.2.conv_shortcut.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.0.upsamplers.0.conv.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.upsamplers.0.conv.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.up_blocks.0.upsamplers.0.conv.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.0.upsamplers.0.conv.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.norm.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.norm.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.proj_in.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.proj_in.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.proj_out.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.0.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.0.proj_out.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.norm.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.norm.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.proj_in.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.proj_in.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.proj_out.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.1.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.1.proj_out.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.norm.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.norm.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.proj_in.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.proj_in.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_k.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_v.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_k.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_v.weight"> : tensor<640x2048xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.norm3.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.norm3.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<5120x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<5120xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.weight"> : tensor<640x2560xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.proj_out.weight"> : tensor<640x640xf16> | |
util.global private @_params.unet.up_blocks.1.attentions.2.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.attentions.2.proj_out.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.norm1.weight"> : tensor<1920xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.norm1.bias"> : tensor<1920xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.conv1.weight"> : tensor<640x1920x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.conv1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.time_emb_proj.weight"> : tensor<640x1280xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.time_emb_proj.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.conv2.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.conv2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.conv_shortcut.weight"> : tensor<640x1920x1x1xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.0.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.0.conv_shortcut.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.conv1.weight"> : tensor<640x1280x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.conv1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.time_emb_proj.weight"> : tensor<640x1280xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.time_emb_proj.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.conv2.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.conv2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.conv_shortcut.weight"> : tensor<640x1280x1x1xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.1.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.1.conv_shortcut.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.norm1.weight"> : tensor<960xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.norm1.bias"> : tensor<960xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.conv1.weight"> : tensor<640x960x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.conv1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.time_emb_proj.weight"> : tensor<640x1280xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.time_emb_proj.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.norm2.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.norm2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.conv2.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.conv2.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.conv_shortcut.weight"> : tensor<640x960x1x1xf16> | |
util.global private @_params.unet.up_blocks.1.resnets.2.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.resnets.2.conv_shortcut.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.1.upsamplers.0.conv.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.upsamplers.0.conv.weight"> : tensor<640x640x3x3xf16> | |
util.global private @_params.unet.up_blocks.1.upsamplers.0.conv.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.1.upsamplers.0.conv.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.norm1.weight"> : tensor<960xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.norm1.bias"> : tensor<960xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.conv1.weight"> : tensor<320x960x3x3xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.conv1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.time_emb_proj.weight"> : tensor<320x1280xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.time_emb_proj.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.norm2.weight"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.norm2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.conv2.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.conv2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.conv_shortcut.weight"> : tensor<320x960x1x1xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.0.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.0.conv_shortcut.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.conv1.weight"> : tensor<320x640x3x3xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.conv1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.time_emb_proj.weight"> : tensor<320x1280xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.time_emb_proj.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.norm2.weight"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.norm2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.conv2.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.conv2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.conv_shortcut.weight"> : tensor<320x640x1x1xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.1.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.1.conv_shortcut.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.norm1.weight"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.norm1.bias"> : tensor<640xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.conv1.weight"> : tensor<320x640x3x3xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.conv1.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.time_emb_proj.weight"> : tensor<320x1280xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.time_emb_proj.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.norm2.weight"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.norm2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.conv2.weight"> : tensor<320x320x3x3xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.conv2.bias"> : tensor<320xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.conv_shortcut.weight {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.conv_shortcut.weight"> : tensor<320x640x1x1xf16> | |
util.global private @_params.unet.up_blocks.2.resnets.2.conv_shortcut.bias {noinline} = #stream.parameter.named<"model"::"unet.up_blocks.2.resnets.2.conv_shortcut.bias"> : tensor<320xf16> | |
util.global private @_params.unet.mid_block.attentions.0.norm.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.norm.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.norm.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.norm.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.proj_in.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.proj_in.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.proj_in.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.proj_in.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_k.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_v.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_k.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_v.weight"> : tensor<1280x2048xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.norm3.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.norm3.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.norm3.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.norm3.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight"> : tensor<10240x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias"> : tensor<10240xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight"> : tensor<1280x5120xf16> | |
util.global private @_params.unet.mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.proj_out.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.proj_out.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.attentions.0.proj_out.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.attentions.0.proj_out.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.conv1.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.mid_block.resnets.0.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.0.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.mid_block.resnets.0.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.0.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.norm1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.norm1.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.norm1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.norm1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.conv1.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.conv1.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.mid_block.resnets.1.conv1.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.conv1.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.time_emb_proj.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.time_emb_proj.weight"> : tensor<1280x1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.time_emb_proj.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.time_emb_proj.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.norm2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.norm2.weight"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.norm2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.norm2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.mid_block.resnets.1.conv2.weight {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.conv2.weight"> : tensor<1280x1280x3x3xf16> | |
util.global private @_params.unet.mid_block.resnets.1.conv2.bias {noinline} = #stream.parameter.named<"model"::"unet.mid_block.resnets.1.conv2.bias"> : tensor<1280xf16> | |
util.global private @_params.unet.conv_norm_out.weight {noinline} = #stream.parameter.named<"model"::"unet.conv_norm_out.weight"> : tensor<320xf16> | |
util.global private @_params.unet.conv_norm_out.bias {noinline} = #stream.parameter.named<"model"::"unet.conv_norm_out.bias"> : tensor<320xf16> | |
util.global private @_params.unet.conv_out.weight {noinline} = #stream.parameter.named<"model"::"unet.conv_out.weight"> : tensor<4x320x3x3xf16> | |
util.global private @_params.unet.conv_out.bias {noinline} = #stream.parameter.named<"model"::"unet.conv_out.bias"> : tensor<4xf16> | |
func.func @run_initialize(%arg0: tensor<1x4x128x128xf16>) -> (tensor<1x4x128x128xf16>, tensor<2x6xf16>, tensor<i64>) attributes {torch.args_schema = "[1, {\22type\22: \22builtins.tuple\22, \22context\22: \22null\22, \22children_spec\22: [{\22type\22: \22builtins.list\22, \22context\22: \22null\22, \22children_spec\22: [{\22type\22: null, \22context\22: null, \22children_spec\22: []}]}, {\22type\22: \22builtins.dict\22, \22context\22: \22[]\22, \22children_spec\22: []}]}]", torch.return_schema = "[1, {\22type\22: \22builtins.tuple\22, \22context\22: \22null\22, \22children_spec\22: [{\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}]}]"} { | |
%0 = torch_c.from_builtin_tensor %arg0 : tensor<1x4x128x128xf16> -> !torch.vtensor<[1,4,128,128],f16> | |
%1:3 = call @initialize(%0) : (!torch.vtensor<[1,4,128,128],f16>) -> (!torch.vtensor<[1,4,128,128],f16>, !torch.vtensor<[2,6],f16>, !torch.vtensor<[],si64>) | |
%2 = torch_c.to_builtin_tensor %1#0 : !torch.vtensor<[1,4,128,128],f16> -> tensor<1x4x128x128xf16> | |
%3 = torch_c.to_builtin_tensor %1#1 : !torch.vtensor<[2,6],f16> -> tensor<2x6xf16> | |
%4 = torch_c.to_builtin_tensor %1#2 : !torch.vtensor<[],si64> -> tensor<i64> | |
return %2, %3, %4 : tensor<1x4x128x128xf16>, tensor<2x6xf16>, tensor<i64> | |
} | |
func.func private @initialize(%arg0: !torch.vtensor<[1,4,128,128],f16>) -> (!torch.vtensor<[1,4,128,128],f16>, !torch.vtensor<[2,6],f16>, !torch.vtensor<[],si64>) { | |
%0 = torch.vtensor.literal(dense_resource<torch_tensor_1_6_torch.int64> : tensor<1x6xsi64>) : !torch.vtensor<[1,6],si64> | |
%none = torch.constant.none | |
%1 = torch.aten.clone %0, %none : !torch.vtensor<[1,6],si64>, !torch.none -> !torch.vtensor<[1,6],si64> | |
%2 = torch.prim.ListConstruct %1, %1 : (!torch.vtensor<[1,6],si64>, !torch.vtensor<[1,6],si64>) -> !torch.list<vtensor> | |
%int0 = torch.constant.int 0 | |
%3 = torch.aten.cat %2, %int0 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2,6],si64> | |
%int1 = torch.constant.int 1 | |
%int1_0 = torch.constant.int 1 | |
%4 = torch.prim.ListConstruct %int1, %int1_0 : (!torch.int, !torch.int) -> !torch.list<int> | |
%5 = torch.aten.repeat %3, %4 : !torch.vtensor<[2,6],si64>, !torch.list<int> -> !torch.vtensor<[2,6],si64> | |
%int5 = torch.constant.int 5 | |
%6 = torch.prims.convert_element_type %5, %int5 : !torch.vtensor<[2,6],si64>, !torch.int -> !torch.vtensor<[2,6],f16> | |
%7 = torch.vtensor.literal(dense<3> : tensor<si64>) : !torch.vtensor<[],si64> | |
%none_1 = torch.constant.none | |
%8 = torch.aten.clone %7, %none_1 : !torch.vtensor<[],si64>, !torch.none -> !torch.vtensor<[],si64> | |
%float1.000000e00 = torch.constant.float 1.000000e+00 | |
%9 = torch.aten.mul.Scalar %arg0, %float1.000000e00 : !torch.vtensor<[1,4,128,128],f16>, !torch.float -> !torch.vtensor<[1,4,128,128],f16> | |
return %9, %6, %8 : !torch.vtensor<[1,4,128,128],f16>, !torch.vtensor<[2,6],f16>, !torch.vtensor<[],si64> | |
} | |
func.func @run_forward(%arg0: tensor<1x4x128x128xf16>, %arg1: tensor<2x64x2048xf16>, %arg2: tensor<2x1280xf16>, %arg3: tensor<2x6xf16>, %arg4: tensor<1xf16>, %arg5: tensor<1xi64>) -> tensor<1x4x128x128xf16> attributes {torch.args_schema = "[1, {\22type\22: \22builtins.tuple\22, \22context\22: \22null\22, \22children_spec\22: [{\22type\22: \22builtins.list\22, \22context\22: \22null\22, \22children_spec\22: [{\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}, {\22type\22: null, \22context\22: null, \22children_spec\22: []}]}, {\22type\22: \22builtins.dict\22, \22context\22: \22[]\22, \22children_spec\22: []}]}]", torch.return_schema = "[1, {\22type\22: null, \22context\22: null, \22children_spec\22: []}]"} { | |
%0 = torch_c.from_builtin_tensor %arg0 : tensor<1x4x128x128xf16> -> !torch.vtensor<[1,4,128,128],f16> | |
%1 = torch_c.from_builtin_tensor %arg1 : tensor<2x64x2048xf16> -> !torch.vtensor<[2,64,2048],f16> | |
%2 = torch_c.from_builtin_tensor %arg2 : tensor<2x1280xf16> -> !torch.vtensor<[2,1280],f16> | |
%3 = torch_c.from_builtin_tensor %arg3 : tensor<2x6xf16> -> !torch.vtensor<[2,6],f16> | |
%4 = torch_c.from_builtin_tensor %arg4 : tensor<1xf16> -> !torch.vtensor<[1],f16> | |
%5 = torch_c.from_builtin_tensor %arg5 : tensor<1xi64> -> !torch.vtensor<[1],si64> | |
%6 = call @forward(%0, %1, %2, %3, %4, %5) : (!torch.vtensor<[1,4,128,128],f16>, !torch.vtensor<[2,64,2048],f16>, !torch.vtensor<[2,1280],f16>, !torch.vtensor<[2,6],f16>, !torch.vtensor<[1],f16>, !torch.vtensor<[1],si64>) -> !torch.vtensor<[1,4,128,128],f16> | |
%7 = torch_c.to_builtin_tensor %6 : !torch.vtensor<[1,4,128,128],f16> -> tensor<1x4x128x128xf16> | |
return %7 : tensor<1x4x128x128xf16> | |
} | |
func.func private @forward(%arg0: !torch.vtensor<[1,4,128,128],f16>, %arg1: !torch.vtensor<[2,64,2048],f16>, %arg2: !torch.vtensor<[2,1280],f16>, %arg3: !torch.vtensor<[2,6],f16>, %arg4: !torch.vtensor<[1],f16>, %arg5: !torch.vtensor<[1],si64>) -> !torch.vtensor<[1,4,128,128],f16> { | |
%0 = torch.vtensor.literal(dense_resource<torch_tensor_3_torch.int64> : tensor<3xsi64>) : !torch.vtensor<[3],si64> | |
%1 = torch.prim.ListConstruct %arg5 : (!torch.vtensor<[1],si64>) -> !torch.list<optional<vtensor>> | |
%2 = torch.aten.index.Tensor %0, %1 : !torch.vtensor<[3],si64>, !torch.list<optional<vtensor>> -> !torch.vtensor<[1],si64> | |
%3 = torch.prim.ListConstruct %arg0, %arg0 : (!torch.vtensor<[1,4,128,128],f16>, !torch.vtensor<[1,4,128,128],f16>) -> !torch.list<vtensor> | |
%int0 = torch.constant.int 0 | |
%4 = torch.aten.cat %3, %int0 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2,4,128,128],f16> | |
%int2 = torch.constant.int 2 | |
%5 = torch.prim.ListConstruct %int2 : (!torch.int) -> !torch.list<int> | |
%false = torch.constant.bool false | |
%6 = torch.aten.expand %2, %5, %false : !torch.vtensor<[1],si64>, !torch.list<int>, !torch.bool -> !torch.vtensor<[2],si64> | |
%int0_0 = torch.constant.int 0 | |
%int160 = torch.constant.int 160 | |
%int6 = torch.constant.int 6 | |
%none = torch.constant.none | |
%cpu = torch.constant.device "cpu" | |
%false_1 = torch.constant.bool false | |
%7 = torch.aten.arange.start %int0_0, %int160, %int6, %none, %cpu, %false_1 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[160],f32> | |
%float-9.210340e00 = torch.constant.float -9.2103403719761836 | |
%8 = torch.aten.mul.Scalar %7, %float-9.210340e00 : !torch.vtensor<[160],f32>, !torch.float -> !torch.vtensor<[160],f32> | |
%int160_2 = torch.constant.int 160 | |
%9 = torch.aten.div.Scalar %8, %int160_2 : !torch.vtensor<[160],f32>, !torch.int -> !torch.vtensor<[160],f32> | |
%10 = torch.aten.exp %9 : !torch.vtensor<[160],f32> -> !torch.vtensor<[160],f32> | |
%int0_3 = torch.constant.int 0 | |
%int0_4 = torch.constant.int 0 | |
%int9223372036854775807 = torch.constant.int 9223372036854775807 | |
%int1 = torch.constant.int 1 | |
%11 = torch.aten.slice.Tensor %6, %int0_3, %int0_4, %int9223372036854775807, %int1 : !torch.vtensor<[2],si64>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2],si64> | |
%int1_5 = torch.constant.int 1 | |
%12 = torch.aten.unsqueeze %11, %int1_5 : !torch.vtensor<[2],si64>, !torch.int -> !torch.vtensor<[2,1],si64> | |
%int6_6 = torch.constant.int 6 | |
%13 = torch.prims.convert_element_type %12, %int6_6 : !torch.vtensor<[2,1],si64>, !torch.int -> !torch.vtensor<[2,1],f32> | |
%int0_7 = torch.constant.int 0 | |
%14 = torch.aten.unsqueeze %10, %int0_7 : !torch.vtensor<[160],f32>, !torch.int -> !torch.vtensor<[1,160],f32> | |
%int1_8 = torch.constant.int 1 | |
%int0_9 = torch.constant.int 0 | |
%int9223372036854775807_10 = torch.constant.int 9223372036854775807 | |
%int1_11 = torch.constant.int 1 | |
%15 = torch.aten.slice.Tensor %14, %int1_8, %int0_9, %int9223372036854775807_10, %int1_11 : !torch.vtensor<[1,160],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,160],f32> | |
%16 = torch.aten.mul.Tensor %13, %15 : !torch.vtensor<[2,1],f32>, !torch.vtensor<[1,160],f32> -> !torch.vtensor<[2,160],f32> | |
%int1_12 = torch.constant.int 1 | |
%17 = torch.aten.mul.Scalar %16, %int1_12 : !torch.vtensor<[2,160],f32>, !torch.int -> !torch.vtensor<[2,160],f32> | |
%18 = torch.aten.sin %17 : !torch.vtensor<[2,160],f32> -> !torch.vtensor<[2,160],f32> | |
%19 = torch.aten.cos %17 : !torch.vtensor<[2,160],f32> -> !torch.vtensor<[2,160],f32> | |
%20 = torch.prim.ListConstruct %18, %19 : (!torch.vtensor<[2,160],f32>, !torch.vtensor<[2,160],f32>) -> !torch.list<vtensor> | |
%int-1 = torch.constant.int -1 | |
%21 = torch.aten.cat %20, %int-1 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int0_13 = torch.constant.int 0 | |
%int0_14 = torch.constant.int 0 | |
%int9223372036854775807_15 = torch.constant.int 9223372036854775807 | |
%int1_16 = torch.constant.int 1 | |
%22 = torch.aten.slice.Tensor %21, %int0_13, %int0_14, %int9223372036854775807_15, %int1_16 : !torch.vtensor<[2,320],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int1_17 = torch.constant.int 1 | |
%int160_18 = torch.constant.int 160 | |
%int9223372036854775807_19 = torch.constant.int 9223372036854775807 | |
%int1_20 = torch.constant.int 1 | |
%23 = torch.aten.slice.Tensor %22, %int1_17, %int160_18, %int9223372036854775807_19, %int1_20 : !torch.vtensor<[2,320],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,160],f32> | |
%int0_21 = torch.constant.int 0 | |
%int0_22 = torch.constant.int 0 | |
%int9223372036854775807_23 = torch.constant.int 9223372036854775807 | |
%int1_24 = torch.constant.int 1 | |
%24 = torch.aten.slice.Tensor %21, %int0_21, %int0_22, %int9223372036854775807_23, %int1_24 : !torch.vtensor<[2,320],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int1_25 = torch.constant.int 1 | |
%int0_26 = torch.constant.int 0 | |
%int160_27 = torch.constant.int 160 | |
%int1_28 = torch.constant.int 1 | |
%25 = torch.aten.slice.Tensor %24, %int1_25, %int0_26, %int160_27, %int1_28 : !torch.vtensor<[2,320],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,160],f32> | |
%26 = torch.prim.ListConstruct %23, %25 : (!torch.vtensor<[2,160],f32>, !torch.vtensor<[2,160],f32>) -> !torch.list<vtensor> | |
%int-1_29 = torch.constant.int -1 | |
%27 = torch.aten.cat %26, %int-1_29 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int5 = torch.constant.int 5 | |
%28 = torch.prims.convert_element_type %27, %int5 : !torch.vtensor<[2,320],f32>, !torch.int -> !torch.vtensor<[2,320],f16> | |
%_params.unet.time_embedding.linear_1.weight = util.global.load @_params.unet.time_embedding.linear_1.weight : tensor<1280x320xf16> | |
%29 = torch_c.from_builtin_tensor %_params.unet.time_embedding.linear_1.weight : tensor<1280x320xf16> -> !torch.vtensor<[1280,320],f16> | |
%int0_30 = torch.constant.int 0 | |
%int1_31 = torch.constant.int 1 | |
%30 = torch.aten.transpose.int %29, %int0_30, %int1_31 : !torch.vtensor<[1280,320],f16>, !torch.int, !torch.int -> !torch.vtensor<[320,1280],f16> | |
%_params.unet.time_embedding.linear_1.bias = util.global.load @_params.unet.time_embedding.linear_1.bias : tensor<1280xf16> | |
%31 = torch_c.from_builtin_tensor %_params.unet.time_embedding.linear_1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_32 = torch.constant.int 6 | |
%32 = torch.prims.convert_element_type %31, %int6_32 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_33 = torch.constant.int 6 | |
%33 = torch.prims.convert_element_type %28, %int6_33 : !torch.vtensor<[2,320],f16>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int6_34 = torch.constant.int 6 | |
%34 = torch.prims.convert_element_type %30, %int6_34 : !torch.vtensor<[320,1280],f16>, !torch.int -> !torch.vtensor<[320,1280],f32> | |
%35 = torch.aten.mm %33, %34 : !torch.vtensor<[2,320],f32>, !torch.vtensor<[320,1280],f32> -> !torch.vtensor<[2,1280],f32> | |
%int1_35 = torch.constant.int 1 | |
%36 = torch.aten.mul.Scalar %35, %int1_35 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int1_36 = torch.constant.int 1 | |
%37 = torch.aten.mul.Scalar %32, %int1_36 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_37 = torch.constant.int 1 | |
%38 = torch.aten.add.Tensor %36, %37, %int1_37 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int5_38 = torch.constant.int 5 | |
%39 = torch.prims.convert_element_type %38, %int5_38 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%40 = torch.aten.silu %39 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.time_embedding.linear_2.weight = util.global.load @_params.unet.time_embedding.linear_2.weight : tensor<1280x1280xf16> | |
%41 = torch_c.from_builtin_tensor %_params.unet.time_embedding.linear_2.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_39 = torch.constant.int 0 | |
%int1_40 = torch.constant.int 1 | |
%42 = torch.aten.transpose.int %41, %int0_39, %int1_40 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.time_embedding.linear_2.bias = util.global.load @_params.unet.time_embedding.linear_2.bias : tensor<1280xf16> | |
%43 = torch_c.from_builtin_tensor %_params.unet.time_embedding.linear_2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_41 = torch.constant.int 6 | |
%44 = torch.prims.convert_element_type %43, %int6_41 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_42 = torch.constant.int 6 | |
%45 = torch.prims.convert_element_type %40, %int6_42 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_43 = torch.constant.int 6 | |
%46 = torch.prims.convert_element_type %42, %int6_43 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%47 = torch.aten.mm %45, %46 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2,1280],f32> | |
%int1_44 = torch.constant.int 1 | |
%48 = torch.aten.mul.Scalar %47, %int1_44 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int1_45 = torch.constant.int 1 | |
%49 = torch.aten.mul.Scalar %44, %int1_45 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_46 = torch.constant.int 1 | |
%50 = torch.aten.add.Tensor %48, %49, %int1_46 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int5_47 = torch.constant.int 5 | |
%51 = torch.prims.convert_element_type %50, %int5_47 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%int12 = torch.constant.int 12 | |
%52 = torch.prim.ListConstruct %int12 : (!torch.int) -> !torch.list<int> | |
%53 = torch.aten.view %arg3, %52 : !torch.vtensor<[2,6],f16>, !torch.list<int> -> !torch.vtensor<[12],f16> | |
%int0_48 = torch.constant.int 0 | |
%int128 = torch.constant.int 128 | |
%int6_49 = torch.constant.int 6 | |
%none_50 = torch.constant.none | |
%cpu_51 = torch.constant.device "cpu" | |
%false_52 = torch.constant.bool false | |
%54 = torch.aten.arange.start %int0_48, %int128, %int6_49, %none_50, %cpu_51, %false_52 : !torch.int, !torch.int, !torch.int, !torch.none, !torch.Device, !torch.bool -> !torch.vtensor<[128],f32> | |
%float-9.210340e00_53 = torch.constant.float -9.2103403719761836 | |
%55 = torch.aten.mul.Scalar %54, %float-9.210340e00_53 : !torch.vtensor<[128],f32>, !torch.float -> !torch.vtensor<[128],f32> | |
%int128_54 = torch.constant.int 128 | |
%56 = torch.aten.div.Scalar %55, %int128_54 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[128],f32> | |
%57 = torch.aten.exp %56 : !torch.vtensor<[128],f32> -> !torch.vtensor<[128],f32> | |
%int0_55 = torch.constant.int 0 | |
%int0_56 = torch.constant.int 0 | |
%int9223372036854775807_57 = torch.constant.int 9223372036854775807 | |
%int1_58 = torch.constant.int 1 | |
%58 = torch.aten.slice.Tensor %53, %int0_55, %int0_56, %int9223372036854775807_57, %int1_58 : !torch.vtensor<[12],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[12],f16> | |
%int1_59 = torch.constant.int 1 | |
%59 = torch.aten.unsqueeze %58, %int1_59 : !torch.vtensor<[12],f16>, !torch.int -> !torch.vtensor<[12,1],f16> | |
%int6_60 = torch.constant.int 6 | |
%60 = torch.prims.convert_element_type %59, %int6_60 : !torch.vtensor<[12,1],f16>, !torch.int -> !torch.vtensor<[12,1],f32> | |
%int0_61 = torch.constant.int 0 | |
%61 = torch.aten.unsqueeze %57, %int0_61 : !torch.vtensor<[128],f32>, !torch.int -> !torch.vtensor<[1,128],f32> | |
%int1_62 = torch.constant.int 1 | |
%int0_63 = torch.constant.int 0 | |
%int9223372036854775807_64 = torch.constant.int 9223372036854775807 | |
%int1_65 = torch.constant.int 1 | |
%62 = torch.aten.slice.Tensor %61, %int1_62, %int0_63, %int9223372036854775807_64, %int1_65 : !torch.vtensor<[1,128],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[1,128],f32> | |
%63 = torch.aten.mul.Tensor %60, %62 : !torch.vtensor<[12,1],f32>, !torch.vtensor<[1,128],f32> -> !torch.vtensor<[12,128],f32> | |
%int1_66 = torch.constant.int 1 | |
%64 = torch.aten.mul.Scalar %63, %int1_66 : !torch.vtensor<[12,128],f32>, !torch.int -> !torch.vtensor<[12,128],f32> | |
%65 = torch.aten.sin %64 : !torch.vtensor<[12,128],f32> -> !torch.vtensor<[12,128],f32> | |
%66 = torch.aten.cos %64 : !torch.vtensor<[12,128],f32> -> !torch.vtensor<[12,128],f32> | |
%67 = torch.prim.ListConstruct %65, %66 : (!torch.vtensor<[12,128],f32>, !torch.vtensor<[12,128],f32>) -> !torch.list<vtensor> | |
%int-1_67 = torch.constant.int -1 | |
%68 = torch.aten.cat %67, %int-1_67 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[12,256],f32> | |
%int0_68 = torch.constant.int 0 | |
%int0_69 = torch.constant.int 0 | |
%int9223372036854775807_70 = torch.constant.int 9223372036854775807 | |
%int1_71 = torch.constant.int 1 | |
%69 = torch.aten.slice.Tensor %68, %int0_68, %int0_69, %int9223372036854775807_70, %int1_71 : !torch.vtensor<[12,256],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[12,256],f32> | |
%int1_72 = torch.constant.int 1 | |
%int128_73 = torch.constant.int 128 | |
%int9223372036854775807_74 = torch.constant.int 9223372036854775807 | |
%int1_75 = torch.constant.int 1 | |
%70 = torch.aten.slice.Tensor %69, %int1_72, %int128_73, %int9223372036854775807_74, %int1_75 : !torch.vtensor<[12,256],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[12,128],f32> | |
%int0_76 = torch.constant.int 0 | |
%int0_77 = torch.constant.int 0 | |
%int9223372036854775807_78 = torch.constant.int 9223372036854775807 | |
%int1_79 = torch.constant.int 1 | |
%71 = torch.aten.slice.Tensor %68, %int0_76, %int0_77, %int9223372036854775807_78, %int1_79 : !torch.vtensor<[12,256],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[12,256],f32> | |
%int1_80 = torch.constant.int 1 | |
%int0_81 = torch.constant.int 0 | |
%int128_82 = torch.constant.int 128 | |
%int1_83 = torch.constant.int 1 | |
%72 = torch.aten.slice.Tensor %71, %int1_80, %int0_81, %int128_82, %int1_83 : !torch.vtensor<[12,256],f32>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[12,128],f32> | |
%73 = torch.prim.ListConstruct %70, %72 : (!torch.vtensor<[12,128],f32>, !torch.vtensor<[12,128],f32>) -> !torch.list<vtensor> | |
%int-1_84 = torch.constant.int -1 | |
%74 = torch.aten.cat %73, %int-1_84 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[12,256],f32> | |
%int2_85 = torch.constant.int 2 | |
%int-1_86 = torch.constant.int -1 | |
%75 = torch.prim.ListConstruct %int2_85, %int-1_86 : (!torch.int, !torch.int) -> !torch.list<int> | |
%76 = torch.aten.view %74, %75 : !torch.vtensor<[12,256],f32>, !torch.list<int> -> !torch.vtensor<[2,1536],f32> | |
%77 = torch.prim.ListConstruct %arg2, %76 : (!torch.vtensor<[2,1280],f16>, !torch.vtensor<[2,1536],f32>) -> !torch.list<vtensor> | |
%int-1_87 = torch.constant.int -1 | |
%78 = torch.aten.cat %77, %int-1_87 : !torch.list<vtensor>, !torch.int -> !torch.vtensor<[2,2816],f32> | |
%int5_88 = torch.constant.int 5 | |
%79 = torch.prims.convert_element_type %78, %int5_88 : !torch.vtensor<[2,2816],f32>, !torch.int -> !torch.vtensor<[2,2816],f16> | |
%_params.unet.add_embedding.linear_1.weight = util.global.load @_params.unet.add_embedding.linear_1.weight : tensor<1280x2816xf16> | |
%80 = torch_c.from_builtin_tensor %_params.unet.add_embedding.linear_1.weight : tensor<1280x2816xf16> -> !torch.vtensor<[1280,2816],f16> | |
%int0_89 = torch.constant.int 0 | |
%int1_90 = torch.constant.int 1 | |
%81 = torch.aten.transpose.int %80, %int0_89, %int1_90 : !torch.vtensor<[1280,2816],f16>, !torch.int, !torch.int -> !torch.vtensor<[2816,1280],f16> | |
%_params.unet.add_embedding.linear_1.bias = util.global.load @_params.unet.add_embedding.linear_1.bias : tensor<1280xf16> | |
%82 = torch_c.from_builtin_tensor %_params.unet.add_embedding.linear_1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_91 = torch.constant.int 6 | |
%83 = torch.prims.convert_element_type %82, %int6_91 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_92 = torch.constant.int 6 | |
%84 = torch.prims.convert_element_type %79, %int6_92 : !torch.vtensor<[2,2816],f16>, !torch.int -> !torch.vtensor<[2,2816],f32> | |
%int6_93 = torch.constant.int 6 | |
%85 = torch.prims.convert_element_type %81, %int6_93 : !torch.vtensor<[2816,1280],f16>, !torch.int -> !torch.vtensor<[2816,1280],f32> | |
%86 = torch.aten.mm %84, %85 : !torch.vtensor<[2,2816],f32>, !torch.vtensor<[2816,1280],f32> -> !torch.vtensor<[2,1280],f32> | |
%int1_94 = torch.constant.int 1 | |
%87 = torch.aten.mul.Scalar %86, %int1_94 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int1_95 = torch.constant.int 1 | |
%88 = torch.aten.mul.Scalar %83, %int1_95 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_96 = torch.constant.int 1 | |
%89 = torch.aten.add.Tensor %87, %88, %int1_96 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int5_97 = torch.constant.int 5 | |
%90 = torch.prims.convert_element_type %89, %int5_97 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%91 = torch.aten.silu %90 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.add_embedding.linear_2.weight = util.global.load @_params.unet.add_embedding.linear_2.weight : tensor<1280x1280xf16> | |
%92 = torch_c.from_builtin_tensor %_params.unet.add_embedding.linear_2.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_98 = torch.constant.int 0 | |
%int1_99 = torch.constant.int 1 | |
%93 = torch.aten.transpose.int %92, %int0_98, %int1_99 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.add_embedding.linear_2.bias = util.global.load @_params.unet.add_embedding.linear_2.bias : tensor<1280xf16> | |
%94 = torch_c.from_builtin_tensor %_params.unet.add_embedding.linear_2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_100 = torch.constant.int 6 | |
%95 = torch.prims.convert_element_type %94, %int6_100 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_101 = torch.constant.int 6 | |
%96 = torch.prims.convert_element_type %91, %int6_101 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_102 = torch.constant.int 6 | |
%97 = torch.prims.convert_element_type %93, %int6_102 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%98 = torch.aten.mm %96, %97 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2,1280],f32> | |
%int1_103 = torch.constant.int 1 | |
%99 = torch.aten.mul.Scalar %98, %int1_103 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int1_104 = torch.constant.int 1 | |
%100 = torch.aten.mul.Scalar %95, %int1_104 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_105 = torch.constant.int 1 | |
%101 = torch.aten.add.Tensor %99, %100, %int1_105 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int5_106 = torch.constant.int 5 | |
%102 = torch.prims.convert_element_type %101, %int5_106 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%int1_107 = torch.constant.int 1 | |
%103 = torch.aten.add.Tensor %51, %102, %int1_107 : !torch.vtensor<[2,1280],f16>, !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.conv_in.weight = util.global.load @_params.unet.conv_in.weight : tensor<320x4x3x3xf16> | |
%104 = torch_c.from_builtin_tensor %_params.unet.conv_in.weight : tensor<320x4x3x3xf16> -> !torch.vtensor<[320,4,3,3],f16> | |
%_params.unet.conv_in.bias = util.global.load @_params.unet.conv_in.bias : tensor<320xf16> | |
%105 = torch_c.from_builtin_tensor %_params.unet.conv_in.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int1_108 = torch.constant.int 1 | |
%int1_109 = torch.constant.int 1 | |
%106 = torch.prim.ListConstruct %int1_108, %int1_109 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_110 = torch.constant.int 1 | |
%int1_111 = torch.constant.int 1 | |
%107 = torch.prim.ListConstruct %int1_110, %int1_111 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_112 = torch.constant.int 1 | |
%int1_113 = torch.constant.int 1 | |
%108 = torch.prim.ListConstruct %int1_112, %int1_113 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_114 = torch.constant.bool false | |
%int0_115 = torch.constant.int 0 | |
%int0_116 = torch.constant.int 0 | |
%109 = torch.prim.ListConstruct %int0_115, %int0_116 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_117 = torch.constant.int 1 | |
%110 = torch.aten.convolution %4, %104, %105, %106, %107, %108, %false_114, %109, %int1_117 : !torch.vtensor<[2,4,128,128],f16>, !torch.vtensor<[320,4,3,3],f16>, !torch.vtensor<[320],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int2_118 = torch.constant.int 2 | |
%int32 = torch.constant.int 32 | |
%int10 = torch.constant.int 10 | |
%int16384 = torch.constant.int 16384 | |
%111 = torch.prim.ListConstruct %int2_118, %int32, %int10, %int16384 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%112 = torch.aten.view %110, %111 : !torch.vtensor<[2,320,128,128],f16>, !torch.list<int> -> !torch.vtensor<[2,32,10,16384],f16> | |
%int6_119 = torch.constant.int 6 | |
%113 = torch.prims.convert_element_type %112, %int6_119 : !torch.vtensor<[2,32,10,16384],f16>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_120 = torch.constant.int 2 | |
%int3 = torch.constant.int 3 | |
%114 = torch.prim.ListConstruct %int2_120, %int3 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_121 = torch.constant.int 0 | |
%true = torch.constant.bool true | |
%result0, %result1 = torch.aten.var_mean.correction %113, %114, %int0_121, %true : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05 = torch.constant.float 1.000000e-05 | |
%int1_122 = torch.constant.int 1 | |
%115 = torch.aten.add.Scalar %result0, %float1.000000e-05, %int1_122 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%116 = torch.aten.rsqrt %115 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_123 = torch.constant.int 1 | |
%117 = torch.aten.sub.Tensor %112, %result1, %int1_123 : !torch.vtensor<[2,32,10,16384],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%118 = torch.aten.mul.Tensor %117, %116 : !torch.vtensor<[2,32,10,16384],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_124 = torch.constant.int 2 | |
%int320 = torch.constant.int 320 | |
%int128_125 = torch.constant.int 128 | |
%int128_126 = torch.constant.int 128 | |
%119 = torch.prim.ListConstruct %int2_124, %int320, %int128_125, %int128_126 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%120 = torch.aten.view %118, %119 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int> -> !torch.vtensor<[2,320,128,128],f32> | |
%_params.unet.down_blocks.0.resnets.0.norm1.bias = util.global.load @_params.unet.down_blocks.0.resnets.0.norm1.bias : tensor<320xf16> | |
%121 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.norm1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_127 = torch.constant.int 0 | |
%122 = torch.aten.unsqueeze %121, %int0_127 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_128 = torch.constant.int 2 | |
%123 = torch.aten.unsqueeze %122, %int2_128 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_129 = torch.constant.int 3 | |
%124 = torch.aten.unsqueeze %123, %int3_129 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%_params.unet.down_blocks.0.resnets.0.norm1.weight = util.global.load @_params.unet.down_blocks.0.resnets.0.norm1.weight : tensor<320xf16> | |
%125 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.norm1.weight : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_130 = torch.constant.int 0 | |
%126 = torch.aten.unsqueeze %125, %int0_130 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_131 = torch.constant.int 2 | |
%127 = torch.aten.unsqueeze %126, %int2_131 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_132 = torch.constant.int 3 | |
%128 = torch.aten.unsqueeze %127, %int3_132 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%129 = torch.aten.mul.Tensor %120, %128 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16> -> !torch.vtensor<[2,320,128,128],f32> | |
%int1_133 = torch.constant.int 1 | |
%130 = torch.aten.add.Tensor %129, %124, %int1_133 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f32> | |
%int5_134 = torch.constant.int 5 | |
%131 = torch.prims.convert_element_type %130, %int5_134 : !torch.vtensor<[2,320,128,128],f32>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int5_135 = torch.constant.int 5 | |
%132 = torch.prims.convert_element_type %result1, %int5_135 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_136 = torch.constant.int 5 | |
%133 = torch.prims.convert_element_type %116, %int5_136 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_137 = torch.constant.int 3 | |
%134 = torch.prim.ListConstruct %int3_137 : (!torch.int) -> !torch.list<int> | |
%135 = torch.prims.squeeze %132, %134 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_138 = torch.constant.int 2 | |
%136 = torch.prim.ListConstruct %int2_138 : (!torch.int) -> !torch.list<int> | |
%137 = torch.prims.squeeze %135, %136 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_139 = torch.constant.int 3 | |
%138 = torch.prim.ListConstruct %int3_139 : (!torch.int) -> !torch.list<int> | |
%139 = torch.prims.squeeze %133, %138 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_140 = torch.constant.int 2 | |
%140 = torch.prim.ListConstruct %int2_140 : (!torch.int) -> !torch.list<int> | |
%141 = torch.prims.squeeze %139, %140 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%142 = torch.aten.detach %137 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%143 = torch.aten.detach %141 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%144 = torch.aten.silu %131 : !torch.vtensor<[2,320,128,128],f16> -> !torch.vtensor<[2,320,128,128],f16> | |
%_params.unet.down_blocks.0.resnets.0.conv1.weight = util.global.load @_params.unet.down_blocks.0.resnets.0.conv1.weight : tensor<320x320x3x3xf16> | |
%145 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.conv1.weight : tensor<320x320x3x3xf16> -> !torch.vtensor<[320,320,3,3],f16> | |
%_params.unet.down_blocks.0.resnets.0.conv1.bias = util.global.load @_params.unet.down_blocks.0.resnets.0.conv1.bias : tensor<320xf16> | |
%146 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.conv1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int1_141 = torch.constant.int 1 | |
%int1_142 = torch.constant.int 1 | |
%147 = torch.prim.ListConstruct %int1_141, %int1_142 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_143 = torch.constant.int 1 | |
%int1_144 = torch.constant.int 1 | |
%148 = torch.prim.ListConstruct %int1_143, %int1_144 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_145 = torch.constant.int 1 | |
%int1_146 = torch.constant.int 1 | |
%149 = torch.prim.ListConstruct %int1_145, %int1_146 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_147 = torch.constant.bool false | |
%int0_148 = torch.constant.int 0 | |
%int0_149 = torch.constant.int 0 | |
%150 = torch.prim.ListConstruct %int0_148, %int0_149 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_150 = torch.constant.int 1 | |
%151 = torch.aten.convolution %144, %145, %146, %147, %148, %149, %false_147, %150, %int1_150 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[320,320,3,3],f16>, !torch.vtensor<[320],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%152 = torch.aten.silu %103 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.down_blocks.0.resnets.0.time_emb_proj.weight = util.global.load @_params.unet.down_blocks.0.resnets.0.time_emb_proj.weight : tensor<320x1280xf16> | |
%153 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.time_emb_proj.weight : tensor<320x1280xf16> -> !torch.vtensor<[320,1280],f16> | |
%int0_151 = torch.constant.int 0 | |
%int1_152 = torch.constant.int 1 | |
%154 = torch.aten.transpose.int %153, %int0_151, %int1_152 : !torch.vtensor<[320,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,320],f16> | |
%_params.unet.down_blocks.0.resnets.0.time_emb_proj.bias = util.global.load @_params.unet.down_blocks.0.resnets.0.time_emb_proj.bias : tensor<320xf16> | |
%155 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.time_emb_proj.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int6_153 = torch.constant.int 6 | |
%156 = torch.prims.convert_element_type %155, %int6_153 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[320],f32> | |
%int6_154 = torch.constant.int 6 | |
%157 = torch.prims.convert_element_type %152, %int6_154 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_155 = torch.constant.int 6 | |
%158 = torch.prims.convert_element_type %154, %int6_155 : !torch.vtensor<[1280,320],f16>, !torch.int -> !torch.vtensor<[1280,320],f32> | |
%159 = torch.aten.mm %157, %158 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,320],f32> -> !torch.vtensor<[2,320],f32> | |
%int1_156 = torch.constant.int 1 | |
%160 = torch.aten.mul.Scalar %159, %int1_156 : !torch.vtensor<[2,320],f32>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int1_157 = torch.constant.int 1 | |
%161 = torch.aten.mul.Scalar %156, %int1_157 : !torch.vtensor<[320],f32>, !torch.int -> !torch.vtensor<[320],f32> | |
%int1_158 = torch.constant.int 1 | |
%162 = torch.aten.add.Tensor %160, %161, %int1_158 : !torch.vtensor<[2,320],f32>, !torch.vtensor<[320],f32>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int5_159 = torch.constant.int 5 | |
%163 = torch.prims.convert_element_type %162, %int5_159 : !torch.vtensor<[2,320],f32>, !torch.int -> !torch.vtensor<[2,320],f16> | |
%int0_160 = torch.constant.int 0 | |
%int0_161 = torch.constant.int 0 | |
%int9223372036854775807_162 = torch.constant.int 9223372036854775807 | |
%int1_163 = torch.constant.int 1 | |
%164 = torch.aten.slice.Tensor %163, %int0_160, %int0_161, %int9223372036854775807_162, %int1_163 : !torch.vtensor<[2,320],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,320],f16> | |
%int1_164 = torch.constant.int 1 | |
%int0_165 = torch.constant.int 0 | |
%int9223372036854775807_166 = torch.constant.int 9223372036854775807 | |
%int1_167 = torch.constant.int 1 | |
%165 = torch.aten.slice.Tensor %164, %int1_164, %int0_165, %int9223372036854775807_166, %int1_167 : !torch.vtensor<[2,320],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,320],f16> | |
%int2_168 = torch.constant.int 2 | |
%166 = torch.aten.unsqueeze %165, %int2_168 : !torch.vtensor<[2,320],f16>, !torch.int -> !torch.vtensor<[2,320,1],f16> | |
%int3_169 = torch.constant.int 3 | |
%167 = torch.aten.unsqueeze %166, %int3_169 : !torch.vtensor<[2,320,1],f16>, !torch.int -> !torch.vtensor<[2,320,1,1],f16> | |
%int1_170 = torch.constant.int 1 | |
%168 = torch.aten.add.Tensor %151, %167, %int1_170 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[2,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int2_171 = torch.constant.int 2 | |
%int32_172 = torch.constant.int 32 | |
%int10_173 = torch.constant.int 10 | |
%int16384_174 = torch.constant.int 16384 | |
%169 = torch.prim.ListConstruct %int2_171, %int32_172, %int10_173, %int16384_174 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%170 = torch.aten.view %168, %169 : !torch.vtensor<[2,320,128,128],f16>, !torch.list<int> -> !torch.vtensor<[2,32,10,16384],f16> | |
%int6_175 = torch.constant.int 6 | |
%171 = torch.prims.convert_element_type %170, %int6_175 : !torch.vtensor<[2,32,10,16384],f16>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_176 = torch.constant.int 2 | |
%int3_177 = torch.constant.int 3 | |
%172 = torch.prim.ListConstruct %int2_176, %int3_177 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_178 = torch.constant.int 0 | |
%true_179 = torch.constant.bool true | |
%result0_180, %result1_181 = torch.aten.var_mean.correction %171, %172, %int0_178, %true_179 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_182 = torch.constant.float 1.000000e-05 | |
%int1_183 = torch.constant.int 1 | |
%173 = torch.aten.add.Scalar %result0_180, %float1.000000e-05_182, %int1_183 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%174 = torch.aten.rsqrt %173 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_184 = torch.constant.int 1 | |
%175 = torch.aten.sub.Tensor %170, %result1_181, %int1_184 : !torch.vtensor<[2,32,10,16384],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%176 = torch.aten.mul.Tensor %175, %174 : !torch.vtensor<[2,32,10,16384],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_185 = torch.constant.int 2 | |
%int320_186 = torch.constant.int 320 | |
%int128_187 = torch.constant.int 128 | |
%int128_188 = torch.constant.int 128 | |
%177 = torch.prim.ListConstruct %int2_185, %int320_186, %int128_187, %int128_188 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%178 = torch.aten.view %176, %177 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int> -> !torch.vtensor<[2,320,128,128],f32> | |
%_params.unet.down_blocks.0.resnets.0.norm2.bias = util.global.load @_params.unet.down_blocks.0.resnets.0.norm2.bias : tensor<320xf16> | |
%179 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.norm2.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_189 = torch.constant.int 0 | |
%180 = torch.aten.unsqueeze %179, %int0_189 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_190 = torch.constant.int 2 | |
%181 = torch.aten.unsqueeze %180, %int2_190 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_191 = torch.constant.int 3 | |
%182 = torch.aten.unsqueeze %181, %int3_191 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%_params.unet.down_blocks.0.resnets.0.norm2.weight = util.global.load @_params.unet.down_blocks.0.resnets.0.norm2.weight : tensor<320xf16> | |
%183 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.norm2.weight : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_192 = torch.constant.int 0 | |
%184 = torch.aten.unsqueeze %183, %int0_192 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_193 = torch.constant.int 2 | |
%185 = torch.aten.unsqueeze %184, %int2_193 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_194 = torch.constant.int 3 | |
%186 = torch.aten.unsqueeze %185, %int3_194 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%187 = torch.aten.mul.Tensor %178, %186 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16> -> !torch.vtensor<[2,320,128,128],f32> | |
%int1_195 = torch.constant.int 1 | |
%188 = torch.aten.add.Tensor %187, %182, %int1_195 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f32> | |
%int5_196 = torch.constant.int 5 | |
%189 = torch.prims.convert_element_type %188, %int5_196 : !torch.vtensor<[2,320,128,128],f32>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int5_197 = torch.constant.int 5 | |
%190 = torch.prims.convert_element_type %result1_181, %int5_197 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_198 = torch.constant.int 5 | |
%191 = torch.prims.convert_element_type %174, %int5_198 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_199 = torch.constant.int 3 | |
%192 = torch.prim.ListConstruct %int3_199 : (!torch.int) -> !torch.list<int> | |
%193 = torch.prims.squeeze %190, %192 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_200 = torch.constant.int 2 | |
%194 = torch.prim.ListConstruct %int2_200 : (!torch.int) -> !torch.list<int> | |
%195 = torch.prims.squeeze %193, %194 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_201 = torch.constant.int 3 | |
%196 = torch.prim.ListConstruct %int3_201 : (!torch.int) -> !torch.list<int> | |
%197 = torch.prims.squeeze %191, %196 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_202 = torch.constant.int 2 | |
%198 = torch.prim.ListConstruct %int2_202 : (!torch.int) -> !torch.list<int> | |
%199 = torch.prims.squeeze %197, %198 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%200 = torch.aten.detach %195 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%201 = torch.aten.detach %199 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%202 = torch.aten.silu %189 : !torch.vtensor<[2,320,128,128],f16> -> !torch.vtensor<[2,320,128,128],f16> | |
%none_203 = torch.constant.none | |
%203 = torch.aten.clone %202, %none_203 : !torch.vtensor<[2,320,128,128],f16>, !torch.none -> !torch.vtensor<[2,320,128,128],f16> | |
%_params.unet.down_blocks.0.resnets.0.conv2.weight = util.global.load @_params.unet.down_blocks.0.resnets.0.conv2.weight : tensor<320x320x3x3xf16> | |
%204 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.conv2.weight : tensor<320x320x3x3xf16> -> !torch.vtensor<[320,320,3,3],f16> | |
%_params.unet.down_blocks.0.resnets.0.conv2.bias = util.global.load @_params.unet.down_blocks.0.resnets.0.conv2.bias : tensor<320xf16> | |
%205 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.0.conv2.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int1_204 = torch.constant.int 1 | |
%int1_205 = torch.constant.int 1 | |
%206 = torch.prim.ListConstruct %int1_204, %int1_205 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_206 = torch.constant.int 1 | |
%int1_207 = torch.constant.int 1 | |
%207 = torch.prim.ListConstruct %int1_206, %int1_207 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_208 = torch.constant.int 1 | |
%int1_209 = torch.constant.int 1 | |
%208 = torch.prim.ListConstruct %int1_208, %int1_209 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_210 = torch.constant.bool false | |
%int0_211 = torch.constant.int 0 | |
%int0_212 = torch.constant.int 0 | |
%209 = torch.prim.ListConstruct %int0_211, %int0_212 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_213 = torch.constant.int 1 | |
%210 = torch.aten.convolution %203, %204, %205, %206, %207, %208, %false_210, %209, %int1_213 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[320,320,3,3],f16>, !torch.vtensor<[320],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int1_214 = torch.constant.int 1 | |
%211 = torch.aten.add.Tensor %110, %210, %int1_214 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[2,320,128,128],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%float1.000000e00 = torch.constant.float 1.000000e+00 | |
%212 = torch.aten.div.Scalar %211, %float1.000000e00 : !torch.vtensor<[2,320,128,128],f16>, !torch.float -> !torch.vtensor<[2,320,128,128],f16> | |
%int2_215 = torch.constant.int 2 | |
%int32_216 = torch.constant.int 32 | |
%int10_217 = torch.constant.int 10 | |
%int16384_218 = torch.constant.int 16384 | |
%213 = torch.prim.ListConstruct %int2_215, %int32_216, %int10_217, %int16384_218 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%214 = torch.aten.view %212, %213 : !torch.vtensor<[2,320,128,128],f16>, !torch.list<int> -> !torch.vtensor<[2,32,10,16384],f16> | |
%int6_219 = torch.constant.int 6 | |
%215 = torch.prims.convert_element_type %214, %int6_219 : !torch.vtensor<[2,32,10,16384],f16>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_220 = torch.constant.int 2 | |
%int3_221 = torch.constant.int 3 | |
%216 = torch.prim.ListConstruct %int2_220, %int3_221 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_222 = torch.constant.int 0 | |
%true_223 = torch.constant.bool true | |
%result0_224, %result1_225 = torch.aten.var_mean.correction %215, %216, %int0_222, %true_223 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_226 = torch.constant.float 1.000000e-05 | |
%int1_227 = torch.constant.int 1 | |
%217 = torch.aten.add.Scalar %result0_224, %float1.000000e-05_226, %int1_227 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%218 = torch.aten.rsqrt %217 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_228 = torch.constant.int 1 | |
%219 = torch.aten.sub.Tensor %214, %result1_225, %int1_228 : !torch.vtensor<[2,32,10,16384],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%220 = torch.aten.mul.Tensor %219, %218 : !torch.vtensor<[2,32,10,16384],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_229 = torch.constant.int 2 | |
%int320_230 = torch.constant.int 320 | |
%int128_231 = torch.constant.int 128 | |
%int128_232 = torch.constant.int 128 | |
%221 = torch.prim.ListConstruct %int2_229, %int320_230, %int128_231, %int128_232 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%222 = torch.aten.view %220, %221 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int> -> !torch.vtensor<[2,320,128,128],f32> | |
%_params.unet.down_blocks.0.resnets.1.norm1.bias = util.global.load @_params.unet.down_blocks.0.resnets.1.norm1.bias : tensor<320xf16> | |
%223 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.norm1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_233 = torch.constant.int 0 | |
%224 = torch.aten.unsqueeze %223, %int0_233 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_234 = torch.constant.int 2 | |
%225 = torch.aten.unsqueeze %224, %int2_234 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_235 = torch.constant.int 3 | |
%226 = torch.aten.unsqueeze %225, %int3_235 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%_params.unet.down_blocks.0.resnets.1.norm1.weight = util.global.load @_params.unet.down_blocks.0.resnets.1.norm1.weight : tensor<320xf16> | |
%227 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.norm1.weight : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_236 = torch.constant.int 0 | |
%228 = torch.aten.unsqueeze %227, %int0_236 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_237 = torch.constant.int 2 | |
%229 = torch.aten.unsqueeze %228, %int2_237 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_238 = torch.constant.int 3 | |
%230 = torch.aten.unsqueeze %229, %int3_238 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%231 = torch.aten.mul.Tensor %222, %230 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16> -> !torch.vtensor<[2,320,128,128],f32> | |
%int1_239 = torch.constant.int 1 | |
%232 = torch.aten.add.Tensor %231, %226, %int1_239 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f32> | |
%int5_240 = torch.constant.int 5 | |
%233 = torch.prims.convert_element_type %232, %int5_240 : !torch.vtensor<[2,320,128,128],f32>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int5_241 = torch.constant.int 5 | |
%234 = torch.prims.convert_element_type %result1_225, %int5_241 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_242 = torch.constant.int 5 | |
%235 = torch.prims.convert_element_type %218, %int5_242 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_243 = torch.constant.int 3 | |
%236 = torch.prim.ListConstruct %int3_243 : (!torch.int) -> !torch.list<int> | |
%237 = torch.prims.squeeze %234, %236 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_244 = torch.constant.int 2 | |
%238 = torch.prim.ListConstruct %int2_244 : (!torch.int) -> !torch.list<int> | |
%239 = torch.prims.squeeze %237, %238 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_245 = torch.constant.int 3 | |
%240 = torch.prim.ListConstruct %int3_245 : (!torch.int) -> !torch.list<int> | |
%241 = torch.prims.squeeze %235, %240 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_246 = torch.constant.int 2 | |
%242 = torch.prim.ListConstruct %int2_246 : (!torch.int) -> !torch.list<int> | |
%243 = torch.prims.squeeze %241, %242 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%244 = torch.aten.detach %239 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%245 = torch.aten.detach %243 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%246 = torch.aten.silu %233 : !torch.vtensor<[2,320,128,128],f16> -> !torch.vtensor<[2,320,128,128],f16> | |
%_params.unet.down_blocks.0.resnets.1.conv1.weight = util.global.load @_params.unet.down_blocks.0.resnets.1.conv1.weight : tensor<320x320x3x3xf16> | |
%247 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.conv1.weight : tensor<320x320x3x3xf16> -> !torch.vtensor<[320,320,3,3],f16> | |
%_params.unet.down_blocks.0.resnets.1.conv1.bias = util.global.load @_params.unet.down_blocks.0.resnets.1.conv1.bias : tensor<320xf16> | |
%248 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.conv1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int1_247 = torch.constant.int 1 | |
%int1_248 = torch.constant.int 1 | |
%249 = torch.prim.ListConstruct %int1_247, %int1_248 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_249 = torch.constant.int 1 | |
%int1_250 = torch.constant.int 1 | |
%250 = torch.prim.ListConstruct %int1_249, %int1_250 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_251 = torch.constant.int 1 | |
%int1_252 = torch.constant.int 1 | |
%251 = torch.prim.ListConstruct %int1_251, %int1_252 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_253 = torch.constant.bool false | |
%int0_254 = torch.constant.int 0 | |
%int0_255 = torch.constant.int 0 | |
%252 = torch.prim.ListConstruct %int0_254, %int0_255 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_256 = torch.constant.int 1 | |
%253 = torch.aten.convolution %246, %247, %248, %249, %250, %251, %false_253, %252, %int1_256 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[320,320,3,3],f16>, !torch.vtensor<[320],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%254 = torch.aten.silu %103 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.down_blocks.0.resnets.1.time_emb_proj.weight = util.global.load @_params.unet.down_blocks.0.resnets.1.time_emb_proj.weight : tensor<320x1280xf16> | |
%255 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.time_emb_proj.weight : tensor<320x1280xf16> -> !torch.vtensor<[320,1280],f16> | |
%int0_257 = torch.constant.int 0 | |
%int1_258 = torch.constant.int 1 | |
%256 = torch.aten.transpose.int %255, %int0_257, %int1_258 : !torch.vtensor<[320,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,320],f16> | |
%_params.unet.down_blocks.0.resnets.1.time_emb_proj.bias = util.global.load @_params.unet.down_blocks.0.resnets.1.time_emb_proj.bias : tensor<320xf16> | |
%257 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.time_emb_proj.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int6_259 = torch.constant.int 6 | |
%258 = torch.prims.convert_element_type %257, %int6_259 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[320],f32> | |
%int6_260 = torch.constant.int 6 | |
%259 = torch.prims.convert_element_type %254, %int6_260 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_261 = torch.constant.int 6 | |
%260 = torch.prims.convert_element_type %256, %int6_261 : !torch.vtensor<[1280,320],f16>, !torch.int -> !torch.vtensor<[1280,320],f32> | |
%261 = torch.aten.mm %259, %260 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,320],f32> -> !torch.vtensor<[2,320],f32> | |
%int1_262 = torch.constant.int 1 | |
%262 = torch.aten.mul.Scalar %261, %int1_262 : !torch.vtensor<[2,320],f32>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int1_263 = torch.constant.int 1 | |
%263 = torch.aten.mul.Scalar %258, %int1_263 : !torch.vtensor<[320],f32>, !torch.int -> !torch.vtensor<[320],f32> | |
%int1_264 = torch.constant.int 1 | |
%264 = torch.aten.add.Tensor %262, %263, %int1_264 : !torch.vtensor<[2,320],f32>, !torch.vtensor<[320],f32>, !torch.int -> !torch.vtensor<[2,320],f32> | |
%int5_265 = torch.constant.int 5 | |
%265 = torch.prims.convert_element_type %264, %int5_265 : !torch.vtensor<[2,320],f32>, !torch.int -> !torch.vtensor<[2,320],f16> | |
%int0_266 = torch.constant.int 0 | |
%int0_267 = torch.constant.int 0 | |
%int9223372036854775807_268 = torch.constant.int 9223372036854775807 | |
%int1_269 = torch.constant.int 1 | |
%266 = torch.aten.slice.Tensor %265, %int0_266, %int0_267, %int9223372036854775807_268, %int1_269 : !torch.vtensor<[2,320],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,320],f16> | |
%int1_270 = torch.constant.int 1 | |
%int0_271 = torch.constant.int 0 | |
%int9223372036854775807_272 = torch.constant.int 9223372036854775807 | |
%int1_273 = torch.constant.int 1 | |
%267 = torch.aten.slice.Tensor %266, %int1_270, %int0_271, %int9223372036854775807_272, %int1_273 : !torch.vtensor<[2,320],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,320],f16> | |
%int2_274 = torch.constant.int 2 | |
%268 = torch.aten.unsqueeze %267, %int2_274 : !torch.vtensor<[2,320],f16>, !torch.int -> !torch.vtensor<[2,320,1],f16> | |
%int3_275 = torch.constant.int 3 | |
%269 = torch.aten.unsqueeze %268, %int3_275 : !torch.vtensor<[2,320,1],f16>, !torch.int -> !torch.vtensor<[2,320,1,1],f16> | |
%int1_276 = torch.constant.int 1 | |
%270 = torch.aten.add.Tensor %253, %269, %int1_276 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[2,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int2_277 = torch.constant.int 2 | |
%int32_278 = torch.constant.int 32 | |
%int10_279 = torch.constant.int 10 | |
%int16384_280 = torch.constant.int 16384 | |
%271 = torch.prim.ListConstruct %int2_277, %int32_278, %int10_279, %int16384_280 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%272 = torch.aten.view %270, %271 : !torch.vtensor<[2,320,128,128],f16>, !torch.list<int> -> !torch.vtensor<[2,32,10,16384],f16> | |
%int6_281 = torch.constant.int 6 | |
%273 = torch.prims.convert_element_type %272, %int6_281 : !torch.vtensor<[2,32,10,16384],f16>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_282 = torch.constant.int 2 | |
%int3_283 = torch.constant.int 3 | |
%274 = torch.prim.ListConstruct %int2_282, %int3_283 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_284 = torch.constant.int 0 | |
%true_285 = torch.constant.bool true | |
%result0_286, %result1_287 = torch.aten.var_mean.correction %273, %274, %int0_284, %true_285 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_288 = torch.constant.float 1.000000e-05 | |
%int1_289 = torch.constant.int 1 | |
%275 = torch.aten.add.Scalar %result0_286, %float1.000000e-05_288, %int1_289 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%276 = torch.aten.rsqrt %275 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_290 = torch.constant.int 1 | |
%277 = torch.aten.sub.Tensor %272, %result1_287, %int1_290 : !torch.vtensor<[2,32,10,16384],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,10,16384],f32> | |
%278 = torch.aten.mul.Tensor %277, %276 : !torch.vtensor<[2,32,10,16384],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,10,16384],f32> | |
%int2_291 = torch.constant.int 2 | |
%int320_292 = torch.constant.int 320 | |
%int128_293 = torch.constant.int 128 | |
%int128_294 = torch.constant.int 128 | |
%279 = torch.prim.ListConstruct %int2_291, %int320_292, %int128_293, %int128_294 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%280 = torch.aten.view %278, %279 : !torch.vtensor<[2,32,10,16384],f32>, !torch.list<int> -> !torch.vtensor<[2,320,128,128],f32> | |
%_params.unet.down_blocks.0.resnets.1.norm2.bias = util.global.load @_params.unet.down_blocks.0.resnets.1.norm2.bias : tensor<320xf16> | |
%281 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.norm2.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_295 = torch.constant.int 0 | |
%282 = torch.aten.unsqueeze %281, %int0_295 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_296 = torch.constant.int 2 | |
%283 = torch.aten.unsqueeze %282, %int2_296 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_297 = torch.constant.int 3 | |
%284 = torch.aten.unsqueeze %283, %int3_297 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%_params.unet.down_blocks.0.resnets.1.norm2.weight = util.global.load @_params.unet.down_blocks.0.resnets.1.norm2.weight : tensor<320xf16> | |
%285 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.norm2.weight : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_298 = torch.constant.int 0 | |
%286 = torch.aten.unsqueeze %285, %int0_298 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_299 = torch.constant.int 2 | |
%287 = torch.aten.unsqueeze %286, %int2_299 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_300 = torch.constant.int 3 | |
%288 = torch.aten.unsqueeze %287, %int3_300 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%289 = torch.aten.mul.Tensor %280, %288 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16> -> !torch.vtensor<[2,320,128,128],f32> | |
%int1_301 = torch.constant.int 1 | |
%290 = torch.aten.add.Tensor %289, %284, %int1_301 : !torch.vtensor<[2,320,128,128],f32>, !torch.vtensor<[1,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f32> | |
%int5_302 = torch.constant.int 5 | |
%291 = torch.prims.convert_element_type %290, %int5_302 : !torch.vtensor<[2,320,128,128],f32>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int5_303 = torch.constant.int 5 | |
%292 = torch.prims.convert_element_type %result1_287, %int5_303 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_304 = torch.constant.int 5 | |
%293 = torch.prims.convert_element_type %276, %int5_304 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_305 = torch.constant.int 3 | |
%294 = torch.prim.ListConstruct %int3_305 : (!torch.int) -> !torch.list<int> | |
%295 = torch.prims.squeeze %292, %294 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_306 = torch.constant.int 2 | |
%296 = torch.prim.ListConstruct %int2_306 : (!torch.int) -> !torch.list<int> | |
%297 = torch.prims.squeeze %295, %296 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_307 = torch.constant.int 3 | |
%298 = torch.prim.ListConstruct %int3_307 : (!torch.int) -> !torch.list<int> | |
%299 = torch.prims.squeeze %293, %298 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_308 = torch.constant.int 2 | |
%300 = torch.prim.ListConstruct %int2_308 : (!torch.int) -> !torch.list<int> | |
%301 = torch.prims.squeeze %299, %300 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%302 = torch.aten.detach %297 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%303 = torch.aten.detach %301 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%304 = torch.aten.silu %291 : !torch.vtensor<[2,320,128,128],f16> -> !torch.vtensor<[2,320,128,128],f16> | |
%none_309 = torch.constant.none | |
%305 = torch.aten.clone %304, %none_309 : !torch.vtensor<[2,320,128,128],f16>, !torch.none -> !torch.vtensor<[2,320,128,128],f16> | |
%_params.unet.down_blocks.0.resnets.1.conv2.weight = util.global.load @_params.unet.down_blocks.0.resnets.1.conv2.weight : tensor<320x320x3x3xf16> | |
%306 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.conv2.weight : tensor<320x320x3x3xf16> -> !torch.vtensor<[320,320,3,3],f16> | |
%_params.unet.down_blocks.0.resnets.1.conv2.bias = util.global.load @_params.unet.down_blocks.0.resnets.1.conv2.bias : tensor<320xf16> | |
%307 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.resnets.1.conv2.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int1_310 = torch.constant.int 1 | |
%int1_311 = torch.constant.int 1 | |
%308 = torch.prim.ListConstruct %int1_310, %int1_311 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_312 = torch.constant.int 1 | |
%int1_313 = torch.constant.int 1 | |
%309 = torch.prim.ListConstruct %int1_312, %int1_313 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_314 = torch.constant.int 1 | |
%int1_315 = torch.constant.int 1 | |
%310 = torch.prim.ListConstruct %int1_314, %int1_315 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_316 = torch.constant.bool false | |
%int0_317 = torch.constant.int 0 | |
%int0_318 = torch.constant.int 0 | |
%311 = torch.prim.ListConstruct %int0_317, %int0_318 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_319 = torch.constant.int 1 | |
%312 = torch.aten.convolution %305, %306, %307, %308, %309, %310, %false_316, %311, %int1_319 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[320,320,3,3],f16>, !torch.vtensor<[320],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%int1_320 = torch.constant.int 1 | |
%313 = torch.aten.add.Tensor %212, %312, %int1_320 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[2,320,128,128],f16>, !torch.int -> !torch.vtensor<[2,320,128,128],f16> | |
%float1.000000e00_321 = torch.constant.float 1.000000e+00 | |
%314 = torch.aten.div.Scalar %313, %float1.000000e00_321 : !torch.vtensor<[2,320,128,128],f16>, !torch.float -> !torch.vtensor<[2,320,128,128],f16> | |
%_params.unet.down_blocks.0.downsamplers.0.conv.weight = util.global.load @_params.unet.down_blocks.0.downsamplers.0.conv.weight : tensor<320x320x3x3xf16> | |
%315 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.downsamplers.0.conv.weight : tensor<320x320x3x3xf16> -> !torch.vtensor<[320,320,3,3],f16> | |
%_params.unet.down_blocks.0.downsamplers.0.conv.bias = util.global.load @_params.unet.down_blocks.0.downsamplers.0.conv.bias : tensor<320xf16> | |
%316 = torch_c.from_builtin_tensor %_params.unet.down_blocks.0.downsamplers.0.conv.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int2_322 = torch.constant.int 2 | |
%int2_323 = torch.constant.int 2 | |
%317 = torch.prim.ListConstruct %int2_322, %int2_323 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_324 = torch.constant.int 1 | |
%int1_325 = torch.constant.int 1 | |
%318 = torch.prim.ListConstruct %int1_324, %int1_325 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_326 = torch.constant.int 1 | |
%int1_327 = torch.constant.int 1 | |
%319 = torch.prim.ListConstruct %int1_326, %int1_327 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_328 = torch.constant.bool false | |
%int0_329 = torch.constant.int 0 | |
%int0_330 = torch.constant.int 0 | |
%320 = torch.prim.ListConstruct %int0_329, %int0_330 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_331 = torch.constant.int 1 | |
%321 = torch.aten.convolution %314, %315, %316, %317, %318, %319, %false_328, %320, %int1_331 : !torch.vtensor<[2,320,128,128],f16>, !torch.vtensor<[320,320,3,3],f16>, !torch.vtensor<[320],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,320,64,64],f16> | |
%int2_332 = torch.constant.int 2 | |
%int32_333 = torch.constant.int 32 | |
%int10_334 = torch.constant.int 10 | |
%int4096 = torch.constant.int 4096 | |
%322 = torch.prim.ListConstruct %int2_332, %int32_333, %int10_334, %int4096 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%323 = torch.aten.view %321, %322 : !torch.vtensor<[2,320,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,32,10,4096],f16> | |
%int6_335 = torch.constant.int 6 | |
%324 = torch.prims.convert_element_type %323, %int6_335 : !torch.vtensor<[2,32,10,4096],f16>, !torch.int -> !torch.vtensor<[2,32,10,4096],f32> | |
%int2_336 = torch.constant.int 2 | |
%int3_337 = torch.constant.int 3 | |
%325 = torch.prim.ListConstruct %int2_336, %int3_337 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_338 = torch.constant.int 0 | |
%true_339 = torch.constant.bool true | |
%result0_340, %result1_341 = torch.aten.var_mean.correction %324, %325, %int0_338, %true_339 : !torch.vtensor<[2,32,10,4096],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_342 = torch.constant.float 1.000000e-05 | |
%int1_343 = torch.constant.int 1 | |
%326 = torch.aten.add.Scalar %result0_340, %float1.000000e-05_342, %int1_343 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%327 = torch.aten.rsqrt %326 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_344 = torch.constant.int 1 | |
%328 = torch.aten.sub.Tensor %323, %result1_341, %int1_344 : !torch.vtensor<[2,32,10,4096],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,10,4096],f32> | |
%329 = torch.aten.mul.Tensor %328, %327 : !torch.vtensor<[2,32,10,4096],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,10,4096],f32> | |
%int2_345 = torch.constant.int 2 | |
%int320_346 = torch.constant.int 320 | |
%int64 = torch.constant.int 64 | |
%int64_347 = torch.constant.int 64 | |
%330 = torch.prim.ListConstruct %int2_345, %int320_346, %int64, %int64_347 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%331 = torch.aten.view %329, %330 : !torch.vtensor<[2,32,10,4096],f32>, !torch.list<int> -> !torch.vtensor<[2,320,64,64],f32> | |
%_params.unet.down_blocks.1.resnets.0.norm1.bias = util.global.load @_params.unet.down_blocks.1.resnets.0.norm1.bias : tensor<320xf16> | |
%332 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.norm1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_348 = torch.constant.int 0 | |
%333 = torch.aten.unsqueeze %332, %int0_348 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_349 = torch.constant.int 2 | |
%334 = torch.aten.unsqueeze %333, %int2_349 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_350 = torch.constant.int 3 | |
%335 = torch.aten.unsqueeze %334, %int3_350 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%_params.unet.down_blocks.1.resnets.0.norm1.weight = util.global.load @_params.unet.down_blocks.1.resnets.0.norm1.weight : tensor<320xf16> | |
%336 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.norm1.weight : tensor<320xf16> -> !torch.vtensor<[320],f16> | |
%int0_351 = torch.constant.int 0 | |
%337 = torch.aten.unsqueeze %336, %int0_351 : !torch.vtensor<[320],f16>, !torch.int -> !torch.vtensor<[1,320],f16> | |
%int2_352 = torch.constant.int 2 | |
%338 = torch.aten.unsqueeze %337, %int2_352 : !torch.vtensor<[1,320],f16>, !torch.int -> !torch.vtensor<[1,320,1],f16> | |
%int3_353 = torch.constant.int 3 | |
%339 = torch.aten.unsqueeze %338, %int3_353 : !torch.vtensor<[1,320,1],f16>, !torch.int -> !torch.vtensor<[1,320,1,1],f16> | |
%340 = torch.aten.mul.Tensor %331, %339 : !torch.vtensor<[2,320,64,64],f32>, !torch.vtensor<[1,320,1,1],f16> -> !torch.vtensor<[2,320,64,64],f32> | |
%int1_354 = torch.constant.int 1 | |
%341 = torch.aten.add.Tensor %340, %335, %int1_354 : !torch.vtensor<[2,320,64,64],f32>, !torch.vtensor<[1,320,1,1],f16>, !torch.int -> !torch.vtensor<[2,320,64,64],f32> | |
%int5_355 = torch.constant.int 5 | |
%342 = torch.prims.convert_element_type %341, %int5_355 : !torch.vtensor<[2,320,64,64],f32>, !torch.int -> !torch.vtensor<[2,320,64,64],f16> | |
%int5_356 = torch.constant.int 5 | |
%343 = torch.prims.convert_element_type %result1_341, %int5_356 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_357 = torch.constant.int 5 | |
%344 = torch.prims.convert_element_type %327, %int5_357 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_358 = torch.constant.int 3 | |
%345 = torch.prim.ListConstruct %int3_358 : (!torch.int) -> !torch.list<int> | |
%346 = torch.prims.squeeze %343, %345 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_359 = torch.constant.int 2 | |
%347 = torch.prim.ListConstruct %int2_359 : (!torch.int) -> !torch.list<int> | |
%348 = torch.prims.squeeze %346, %347 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_360 = torch.constant.int 3 | |
%349 = torch.prim.ListConstruct %int3_360 : (!torch.int) -> !torch.list<int> | |
%350 = torch.prims.squeeze %344, %349 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_361 = torch.constant.int 2 | |
%351 = torch.prim.ListConstruct %int2_361 : (!torch.int) -> !torch.list<int> | |
%352 = torch.prims.squeeze %350, %351 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%353 = torch.aten.detach %348 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%354 = torch.aten.detach %352 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%355 = torch.aten.silu %342 : !torch.vtensor<[2,320,64,64],f16> -> !torch.vtensor<[2,320,64,64],f16> | |
%_params.unet.down_blocks.1.resnets.0.conv1.weight = util.global.load @_params.unet.down_blocks.1.resnets.0.conv1.weight : tensor<640x320x3x3xf16> | |
%356 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.conv1.weight : tensor<640x320x3x3xf16> -> !torch.vtensor<[640,320,3,3],f16> | |
%_params.unet.down_blocks.1.resnets.0.conv1.bias = util.global.load @_params.unet.down_blocks.1.resnets.0.conv1.bias : tensor<640xf16> | |
%357 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.conv1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_362 = torch.constant.int 1 | |
%int1_363 = torch.constant.int 1 | |
%358 = torch.prim.ListConstruct %int1_362, %int1_363 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_364 = torch.constant.int 1 | |
%int1_365 = torch.constant.int 1 | |
%359 = torch.prim.ListConstruct %int1_364, %int1_365 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_366 = torch.constant.int 1 | |
%int1_367 = torch.constant.int 1 | |
%360 = torch.prim.ListConstruct %int1_366, %int1_367 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_368 = torch.constant.bool false | |
%int0_369 = torch.constant.int 0 | |
%int0_370 = torch.constant.int 0 | |
%361 = torch.prim.ListConstruct %int0_369, %int0_370 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_371 = torch.constant.int 1 | |
%362 = torch.aten.convolution %355, %356, %357, %358, %359, %360, %false_368, %361, %int1_371 : !torch.vtensor<[2,320,64,64],f16>, !torch.vtensor<[640,320,3,3],f16>, !torch.vtensor<[640],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%363 = torch.aten.silu %103 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.down_blocks.1.resnets.0.time_emb_proj.weight = util.global.load @_params.unet.down_blocks.1.resnets.0.time_emb_proj.weight : tensor<640x1280xf16> | |
%364 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.time_emb_proj.weight : tensor<640x1280xf16> -> !torch.vtensor<[640,1280],f16> | |
%int0_372 = torch.constant.int 0 | |
%int1_373 = torch.constant.int 1 | |
%365 = torch.aten.transpose.int %364, %int0_372, %int1_373 : !torch.vtensor<[640,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,640],f16> | |
%_params.unet.down_blocks.1.resnets.0.time_emb_proj.bias = util.global.load @_params.unet.down_blocks.1.resnets.0.time_emb_proj.bias : tensor<640xf16> | |
%366 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.time_emb_proj.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_374 = torch.constant.int 6 | |
%367 = torch.prims.convert_element_type %366, %int6_374 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_375 = torch.constant.int 6 | |
%368 = torch.prims.convert_element_type %363, %int6_375 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_376 = torch.constant.int 6 | |
%369 = torch.prims.convert_element_type %365, %int6_376 : !torch.vtensor<[1280,640],f16>, !torch.int -> !torch.vtensor<[1280,640],f32> | |
%370 = torch.aten.mm %368, %369 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,640],f32> -> !torch.vtensor<[2,640],f32> | |
%int1_377 = torch.constant.int 1 | |
%371 = torch.aten.mul.Scalar %370, %int1_377 : !torch.vtensor<[2,640],f32>, !torch.int -> !torch.vtensor<[2,640],f32> | |
%int1_378 = torch.constant.int 1 | |
%372 = torch.aten.mul.Scalar %367, %int1_378 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_379 = torch.constant.int 1 | |
%373 = torch.aten.add.Tensor %371, %372, %int1_379 : !torch.vtensor<[2,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[2,640],f32> | |
%int5_380 = torch.constant.int 5 | |
%374 = torch.prims.convert_element_type %373, %int5_380 : !torch.vtensor<[2,640],f32>, !torch.int -> !torch.vtensor<[2,640],f16> | |
%int0_381 = torch.constant.int 0 | |
%int0_382 = torch.constant.int 0 | |
%int9223372036854775807_383 = torch.constant.int 9223372036854775807 | |
%int1_384 = torch.constant.int 1 | |
%375 = torch.aten.slice.Tensor %374, %int0_381, %int0_382, %int9223372036854775807_383, %int1_384 : !torch.vtensor<[2,640],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,640],f16> | |
%int1_385 = torch.constant.int 1 | |
%int0_386 = torch.constant.int 0 | |
%int9223372036854775807_387 = torch.constant.int 9223372036854775807 | |
%int1_388 = torch.constant.int 1 | |
%376 = torch.aten.slice.Tensor %375, %int1_385, %int0_386, %int9223372036854775807_387, %int1_388 : !torch.vtensor<[2,640],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,640],f16> | |
%int2_389 = torch.constant.int 2 | |
%377 = torch.aten.unsqueeze %376, %int2_389 : !torch.vtensor<[2,640],f16>, !torch.int -> !torch.vtensor<[2,640,1],f16> | |
%int3_390 = torch.constant.int 3 | |
%378 = torch.aten.unsqueeze %377, %int3_390 : !torch.vtensor<[2,640,1],f16>, !torch.int -> !torch.vtensor<[2,640,1,1],f16> | |
%int1_391 = torch.constant.int 1 | |
%379 = torch.aten.add.Tensor %362, %378, %int1_391 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[2,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int2_392 = torch.constant.int 2 | |
%int32_393 = torch.constant.int 32 | |
%int20 = torch.constant.int 20 | |
%int4096_394 = torch.constant.int 4096 | |
%380 = torch.prim.ListConstruct %int2_392, %int32_393, %int20, %int4096_394 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%381 = torch.aten.view %379, %380 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,32,20,4096],f16> | |
%int6_395 = torch.constant.int 6 | |
%382 = torch.prims.convert_element_type %381, %int6_395 : !torch.vtensor<[2,32,20,4096],f16>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_396 = torch.constant.int 2 | |
%int3_397 = torch.constant.int 3 | |
%383 = torch.prim.ListConstruct %int2_396, %int3_397 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_398 = torch.constant.int 0 | |
%true_399 = torch.constant.bool true | |
%result0_400, %result1_401 = torch.aten.var_mean.correction %382, %383, %int0_398, %true_399 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_402 = torch.constant.float 1.000000e-05 | |
%int1_403 = torch.constant.int 1 | |
%384 = torch.aten.add.Scalar %result0_400, %float1.000000e-05_402, %int1_403 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%385 = torch.aten.rsqrt %384 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_404 = torch.constant.int 1 | |
%386 = torch.aten.sub.Tensor %381, %result1_401, %int1_404 : !torch.vtensor<[2,32,20,4096],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%387 = torch.aten.mul.Tensor %386, %385 : !torch.vtensor<[2,32,20,4096],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_405 = torch.constant.int 2 | |
%int640 = torch.constant.int 640 | |
%int64_406 = torch.constant.int 64 | |
%int64_407 = torch.constant.int 64 | |
%388 = torch.prim.ListConstruct %int2_405, %int640, %int64_406, %int64_407 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%389 = torch.aten.view %387, %388 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f32> | |
%_params.unet.down_blocks.1.resnets.0.norm2.bias = util.global.load @_params.unet.down_blocks.1.resnets.0.norm2.bias : tensor<640xf16> | |
%390 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.norm2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_408 = torch.constant.int 0 | |
%391 = torch.aten.unsqueeze %390, %int0_408 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_409 = torch.constant.int 2 | |
%392 = torch.aten.unsqueeze %391, %int2_409 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_410 = torch.constant.int 3 | |
%393 = torch.aten.unsqueeze %392, %int3_410 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%_params.unet.down_blocks.1.resnets.0.norm2.weight = util.global.load @_params.unet.down_blocks.1.resnets.0.norm2.weight : tensor<640xf16> | |
%394 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.norm2.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_411 = torch.constant.int 0 | |
%395 = torch.aten.unsqueeze %394, %int0_411 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_412 = torch.constant.int 2 | |
%396 = torch.aten.unsqueeze %395, %int2_412 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_413 = torch.constant.int 3 | |
%397 = torch.aten.unsqueeze %396, %int3_413 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%398 = torch.aten.mul.Tensor %389, %397 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16> -> !torch.vtensor<[2,640,64,64],f32> | |
%int1_414 = torch.constant.int 1 | |
%399 = torch.aten.add.Tensor %398, %393, %int1_414 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f32> | |
%int5_415 = torch.constant.int 5 | |
%400 = torch.prims.convert_element_type %399, %int5_415 : !torch.vtensor<[2,640,64,64],f32>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int5_416 = torch.constant.int 5 | |
%401 = torch.prims.convert_element_type %result1_401, %int5_416 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_417 = torch.constant.int 5 | |
%402 = torch.prims.convert_element_type %385, %int5_417 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_418 = torch.constant.int 3 | |
%403 = torch.prim.ListConstruct %int3_418 : (!torch.int) -> !torch.list<int> | |
%404 = torch.prims.squeeze %401, %403 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_419 = torch.constant.int 2 | |
%405 = torch.prim.ListConstruct %int2_419 : (!torch.int) -> !torch.list<int> | |
%406 = torch.prims.squeeze %404, %405 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_420 = torch.constant.int 3 | |
%407 = torch.prim.ListConstruct %int3_420 : (!torch.int) -> !torch.list<int> | |
%408 = torch.prims.squeeze %402, %407 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_421 = torch.constant.int 2 | |
%409 = torch.prim.ListConstruct %int2_421 : (!torch.int) -> !torch.list<int> | |
%410 = torch.prims.squeeze %408, %409 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%411 = torch.aten.detach %406 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%412 = torch.aten.detach %410 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%413 = torch.aten.silu %400 : !torch.vtensor<[2,640,64,64],f16> -> !torch.vtensor<[2,640,64,64],f16> | |
%none_422 = torch.constant.none | |
%414 = torch.aten.clone %413, %none_422 : !torch.vtensor<[2,640,64,64],f16>, !torch.none -> !torch.vtensor<[2,640,64,64],f16> | |
%_params.unet.down_blocks.1.resnets.0.conv2.weight = util.global.load @_params.unet.down_blocks.1.resnets.0.conv2.weight : tensor<640x640x3x3xf16> | |
%415 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.conv2.weight : tensor<640x640x3x3xf16> -> !torch.vtensor<[640,640,3,3],f16> | |
%_params.unet.down_blocks.1.resnets.0.conv2.bias = util.global.load @_params.unet.down_blocks.1.resnets.0.conv2.bias : tensor<640xf16> | |
%416 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.conv2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_423 = torch.constant.int 1 | |
%int1_424 = torch.constant.int 1 | |
%417 = torch.prim.ListConstruct %int1_423, %int1_424 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_425 = torch.constant.int 1 | |
%int1_426 = torch.constant.int 1 | |
%418 = torch.prim.ListConstruct %int1_425, %int1_426 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_427 = torch.constant.int 1 | |
%int1_428 = torch.constant.int 1 | |
%419 = torch.prim.ListConstruct %int1_427, %int1_428 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_429 = torch.constant.bool false | |
%int0_430 = torch.constant.int 0 | |
%int0_431 = torch.constant.int 0 | |
%420 = torch.prim.ListConstruct %int0_430, %int0_431 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_432 = torch.constant.int 1 | |
%421 = torch.aten.convolution %414, %415, %416, %417, %418, %419, %false_429, %420, %int1_432 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[640,640,3,3],f16>, !torch.vtensor<[640],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%_params.unet.down_blocks.1.resnets.0.conv_shortcut.weight = util.global.load @_params.unet.down_blocks.1.resnets.0.conv_shortcut.weight : tensor<640x320x1x1xf16> | |
%422 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.conv_shortcut.weight : tensor<640x320x1x1xf16> -> !torch.vtensor<[640,320,1,1],f16> | |
%_params.unet.down_blocks.1.resnets.0.conv_shortcut.bias = util.global.load @_params.unet.down_blocks.1.resnets.0.conv_shortcut.bias : tensor<640xf16> | |
%423 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.0.conv_shortcut.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_433 = torch.constant.int 1 | |
%int1_434 = torch.constant.int 1 | |
%424 = torch.prim.ListConstruct %int1_433, %int1_434 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_435 = torch.constant.int 0 | |
%int0_436 = torch.constant.int 0 | |
%425 = torch.prim.ListConstruct %int0_435, %int0_436 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_437 = torch.constant.int 1 | |
%int1_438 = torch.constant.int 1 | |
%426 = torch.prim.ListConstruct %int1_437, %int1_438 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_439 = torch.constant.bool false | |
%int0_440 = torch.constant.int 0 | |
%int0_441 = torch.constant.int 0 | |
%427 = torch.prim.ListConstruct %int0_440, %int0_441 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_442 = torch.constant.int 1 | |
%428 = torch.aten.convolution %321, %422, %423, %424, %425, %426, %false_439, %427, %int1_442 : !torch.vtensor<[2,320,64,64],f16>, !torch.vtensor<[640,320,1,1],f16>, !torch.vtensor<[640],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int1_443 = torch.constant.int 1 | |
%429 = torch.aten.add.Tensor %428, %421, %int1_443 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[2,640,64,64],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%float1.000000e00_444 = torch.constant.float 1.000000e+00 | |
%430 = torch.aten.div.Scalar %429, %float1.000000e00_444 : !torch.vtensor<[2,640,64,64],f16>, !torch.float -> !torch.vtensor<[2,640,64,64],f16> | |
%int2_445 = torch.constant.int 2 | |
%int32_446 = torch.constant.int 32 | |
%int20_447 = torch.constant.int 20 | |
%int4096_448 = torch.constant.int 4096 | |
%431 = torch.prim.ListConstruct %int2_445, %int32_446, %int20_447, %int4096_448 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%432 = torch.aten.view %430, %431 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,32,20,4096],f16> | |
%int6_449 = torch.constant.int 6 | |
%433 = torch.prims.convert_element_type %432, %int6_449 : !torch.vtensor<[2,32,20,4096],f16>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_450 = torch.constant.int 2 | |
%int3_451 = torch.constant.int 3 | |
%434 = torch.prim.ListConstruct %int2_450, %int3_451 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_452 = torch.constant.int 0 | |
%true_453 = torch.constant.bool true | |
%result0_454, %result1_455 = torch.aten.var_mean.correction %433, %434, %int0_452, %true_453 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float9.999990e-07 = torch.constant.float 9.9999999999999995E-7 | |
%int1_456 = torch.constant.int 1 | |
%435 = torch.aten.add.Scalar %result0_454, %float9.999990e-07, %int1_456 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%436 = torch.aten.rsqrt %435 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_457 = torch.constant.int 1 | |
%437 = torch.aten.sub.Tensor %432, %result1_455, %int1_457 : !torch.vtensor<[2,32,20,4096],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%438 = torch.aten.mul.Tensor %437, %436 : !torch.vtensor<[2,32,20,4096],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_458 = torch.constant.int 2 | |
%int640_459 = torch.constant.int 640 | |
%int64_460 = torch.constant.int 64 | |
%int64_461 = torch.constant.int 64 | |
%439 = torch.prim.ListConstruct %int2_458, %int640_459, %int64_460, %int64_461 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%440 = torch.aten.view %438, %439 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f32> | |
%_params.unet.down_blocks.1.attentions.0.norm.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.norm.bias : tensor<640xf16> | |
%441 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.norm.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_462 = torch.constant.int 0 | |
%442 = torch.aten.unsqueeze %441, %int0_462 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_463 = torch.constant.int 2 | |
%443 = torch.aten.unsqueeze %442, %int2_463 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_464 = torch.constant.int 3 | |
%444 = torch.aten.unsqueeze %443, %int3_464 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%_params.unet.down_blocks.1.attentions.0.norm.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.norm.weight : tensor<640xf16> | |
%445 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.norm.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_465 = torch.constant.int 0 | |
%446 = torch.aten.unsqueeze %445, %int0_465 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_466 = torch.constant.int 2 | |
%447 = torch.aten.unsqueeze %446, %int2_466 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_467 = torch.constant.int 3 | |
%448 = torch.aten.unsqueeze %447, %int3_467 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%449 = torch.aten.mul.Tensor %440, %448 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16> -> !torch.vtensor<[2,640,64,64],f32> | |
%int1_468 = torch.constant.int 1 | |
%450 = torch.aten.add.Tensor %449, %444, %int1_468 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f32> | |
%int5_469 = torch.constant.int 5 | |
%451 = torch.prims.convert_element_type %450, %int5_469 : !torch.vtensor<[2,640,64,64],f32>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int5_470 = torch.constant.int 5 | |
%452 = torch.prims.convert_element_type %result1_455, %int5_470 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_471 = torch.constant.int 5 | |
%453 = torch.prims.convert_element_type %436, %int5_471 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_472 = torch.constant.int 3 | |
%454 = torch.prim.ListConstruct %int3_472 : (!torch.int) -> !torch.list<int> | |
%455 = torch.prims.squeeze %452, %454 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_473 = torch.constant.int 2 | |
%456 = torch.prim.ListConstruct %int2_473 : (!torch.int) -> !torch.list<int> | |
%457 = torch.prims.squeeze %455, %456 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_474 = torch.constant.int 3 | |
%458 = torch.prim.ListConstruct %int3_474 : (!torch.int) -> !torch.list<int> | |
%459 = torch.prims.squeeze %453, %458 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_475 = torch.constant.int 2 | |
%460 = torch.prim.ListConstruct %int2_475 : (!torch.int) -> !torch.list<int> | |
%461 = torch.prims.squeeze %459, %460 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%462 = torch.aten.detach %457 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%463 = torch.aten.detach %461 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%int0_476 = torch.constant.int 0 | |
%int2_477 = torch.constant.int 2 | |
%int3_478 = torch.constant.int 3 | |
%int1_479 = torch.constant.int 1 | |
%464 = torch.prim.ListConstruct %int0_476, %int2_477, %int3_478, %int1_479 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%465 = torch.aten.permute %451, %464 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,64,64,640],f16> | |
%int2_480 = torch.constant.int 2 | |
%int4096_481 = torch.constant.int 4096 | |
%int640_482 = torch.constant.int 640 | |
%466 = torch.prim.ListConstruct %int2_480, %int4096_481, %int640_482 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%467 = torch.aten.view %465, %466 : !torch.vtensor<[2,64,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.proj_in.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.proj_in.weight : tensor<640x640xf16> | |
%468 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.proj_in.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_483 = torch.constant.int 0 | |
%int1_484 = torch.constant.int 1 | |
%469 = torch.aten.transpose.int %468, %int0_483, %int1_484 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int0_485 = torch.constant.int 0 | |
%470 = torch.aten.clone %467, %int0_485 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int8192 = torch.constant.int 8192 | |
%int640_486 = torch.constant.int 640 | |
%471 = torch.prim.ListConstruct %int8192, %int640_486 : (!torch.int, !torch.int) -> !torch.list<int> | |
%472 = torch.aten._unsafe_view %470, %471 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%473 = torch.aten.mm %472, %469 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_487 = torch.constant.int 2 | |
%int4096_488 = torch.constant.int 4096 | |
%int640_489 = torch.constant.int 640 | |
%474 = torch.prim.ListConstruct %int2_487, %int4096_488, %int640_489 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%475 = torch.aten.view %473, %474 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.proj_in.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.proj_in.bias : tensor<640xf16> | |
%476 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.proj_in.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_490 = torch.constant.int 1 | |
%477 = torch.aten.add.Tensor %475, %476, %int1_490 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_491 = torch.constant.int 6 | |
%478 = torch.prims.convert_element_type %477, %int6_491 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_492 = torch.constant.int 2 | |
%479 = torch.prim.ListConstruct %int2_492 : (!torch.int) -> !torch.list<int> | |
%int0_493 = torch.constant.int 0 | |
%true_494 = torch.constant.bool true | |
%result0_495, %result1_496 = torch.aten.var_mean.correction %478, %479, %int0_493, %true_494 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_497 = torch.constant.float 1.000000e-05 | |
%int1_498 = torch.constant.int 1 | |
%480 = torch.aten.add.Scalar %result0_495, %float1.000000e-05_497, %int1_498 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%481 = torch.aten.rsqrt %480 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_499 = torch.constant.int 1 | |
%482 = torch.aten.sub.Tensor %477, %result1_496, %int1_499 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%483 = torch.aten.mul.Tensor %482, %481 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight : tensor<640xf16> | |
%484 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%485 = torch.aten.mul.Tensor %483, %484 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias : tensor<640xf16> | |
%486 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_500 = torch.constant.int 1 | |
%487 = torch.aten.add.Tensor %485, %486, %int1_500 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_501 = torch.constant.int 5 | |
%488 = torch.prims.convert_element_type %487, %int5_501 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_502 = torch.constant.int 5 | |
%489 = torch.prims.convert_element_type %result1_496, %int5_502 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_503 = torch.constant.int 5 | |
%490 = torch.prims.convert_element_type %481, %int5_503 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight : tensor<640x640xf16> | |
%491 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_504 = torch.constant.int 0 | |
%int1_505 = torch.constant.int 1 | |
%492 = torch.aten.transpose.int %491, %int0_504, %int1_505 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_506 = torch.constant.int 8192 | |
%int640_507 = torch.constant.int 640 | |
%493 = torch.prim.ListConstruct %int8192_506, %int640_507 : (!torch.int, !torch.int) -> !torch.list<int> | |
%494 = torch.aten.view %488, %493 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%495 = torch.aten.mm %494, %492 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_508 = torch.constant.int 2 | |
%int4096_509 = torch.constant.int 4096 | |
%int640_510 = torch.constant.int 640 | |
%496 = torch.prim.ListConstruct %int2_508, %int4096_509, %int640_510 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%497 = torch.aten.view %495, %496 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight : tensor<640x640xf16> | |
%498 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_k.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_511 = torch.constant.int 0 | |
%int1_512 = torch.constant.int 1 | |
%499 = torch.aten.transpose.int %498, %int0_511, %int1_512 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_513 = torch.constant.int 8192 | |
%int640_514 = torch.constant.int 640 | |
%500 = torch.prim.ListConstruct %int8192_513, %int640_514 : (!torch.int, !torch.int) -> !torch.list<int> | |
%501 = torch.aten.view %488, %500 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%502 = torch.aten.mm %501, %499 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_515 = torch.constant.int 2 | |
%int4096_516 = torch.constant.int 4096 | |
%int640_517 = torch.constant.int 640 | |
%503 = torch.prim.ListConstruct %int2_515, %int4096_516, %int640_517 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%504 = torch.aten.view %502, %503 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight : tensor<640x640xf16> | |
%505 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_v.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_518 = torch.constant.int 0 | |
%int1_519 = torch.constant.int 1 | |
%506 = torch.aten.transpose.int %505, %int0_518, %int1_519 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_520 = torch.constant.int 8192 | |
%int640_521 = torch.constant.int 640 | |
%507 = torch.prim.ListConstruct %int8192_520, %int640_521 : (!torch.int, !torch.int) -> !torch.list<int> | |
%508 = torch.aten.view %488, %507 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%509 = torch.aten.mm %508, %506 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_522 = torch.constant.int 2 | |
%int4096_523 = torch.constant.int 4096 | |
%int640_524 = torch.constant.int 640 | |
%510 = torch.prim.ListConstruct %int2_522, %int4096_523, %int640_524 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%511 = torch.aten.view %509, %510 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int2_525 = torch.constant.int 2 | |
%int-1_526 = torch.constant.int -1 | |
%int10_527 = torch.constant.int 10 | |
%int64_528 = torch.constant.int 64 | |
%512 = torch.prim.ListConstruct %int2_525, %int-1_526, %int10_527, %int64_528 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%513 = torch.aten.view %497, %512 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_529 = torch.constant.int 1 | |
%int2_530 = torch.constant.int 2 | |
%514 = torch.aten.transpose.int %513, %int1_529, %int2_530 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_531 = torch.constant.int 2 | |
%int-1_532 = torch.constant.int -1 | |
%int10_533 = torch.constant.int 10 | |
%int64_534 = torch.constant.int 64 | |
%515 = torch.prim.ListConstruct %int2_531, %int-1_532, %int10_533, %int64_534 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%516 = torch.aten.view %504, %515 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_535 = torch.constant.int 1 | |
%int2_536 = torch.constant.int 2 | |
%517 = torch.aten.transpose.int %516, %int1_535, %int2_536 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_537 = torch.constant.int 2 | |
%int-1_538 = torch.constant.int -1 | |
%int10_539 = torch.constant.int 10 | |
%int64_540 = torch.constant.int 64 | |
%518 = torch.prim.ListConstruct %int2_537, %int-1_538, %int10_539, %int64_540 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%519 = torch.aten.view %511, %518 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_541 = torch.constant.int 1 | |
%int2_542 = torch.constant.int 2 | |
%520 = torch.aten.transpose.int %519, %int1_541, %int2_542 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%float0.000000e00 = torch.constant.float 0.000000e+00 | |
%false_543 = torch.constant.bool false | |
%none_544 = torch.constant.none | |
%none_545 = torch.constant.none | |
%521:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%514, %517, %520, %float0.000000e00, %false_543, %none_544, %none_545) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%522 = torch.aten.detach %521#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_546 = torch.constant.int 1 | |
%int2_547 = torch.constant.int 2 | |
%523 = torch.aten.transpose.int %521#0, %int1_546, %int2_547 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_548 = torch.constant.int 2 | |
%int-1_549 = torch.constant.int -1 | |
%int640_550 = torch.constant.int 640 | |
%524 = torch.prim.ListConstruct %int2_548, %int-1_549, %int640_550 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%525 = torch.aten.view %523, %524 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_551 = torch.constant.int 8192 | |
%int640_552 = torch.constant.int 640 | |
%526 = torch.prim.ListConstruct %int8192_551, %int640_552 : (!torch.int, !torch.int) -> !torch.list<int> | |
%527 = torch.aten.view %525, %526 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight : tensor<640x640xf16> | |
%528 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_553 = torch.constant.int 0 | |
%int1_554 = torch.constant.int 1 | |
%529 = torch.aten.transpose.int %528, %int0_553, %int1_554 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<640xf16> | |
%530 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_555 = torch.constant.int 6 | |
%531 = torch.prims.convert_element_type %530, %int6_555 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_556 = torch.constant.int 6 | |
%532 = torch.prims.convert_element_type %527, %int6_556 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_557 = torch.constant.int 6 | |
%533 = torch.prims.convert_element_type %529, %int6_557 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%534 = torch.aten.mm %532, %533 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_558 = torch.constant.int 1 | |
%535 = torch.aten.mul.Scalar %534, %int1_558 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_559 = torch.constant.int 1 | |
%536 = torch.aten.mul.Scalar %531, %int1_559 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_560 = torch.constant.int 1 | |
%537 = torch.aten.add.Tensor %535, %536, %int1_560 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_561 = torch.constant.int 5 | |
%538 = torch.prims.convert_element_type %537, %int5_561 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_562 = torch.constant.int 2 | |
%int4096_563 = torch.constant.int 4096 | |
%int640_564 = torch.constant.int 640 | |
%539 = torch.prim.ListConstruct %int2_562, %int4096_563, %int640_564 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%540 = torch.aten.view %538, %539 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_565 = torch.constant.none | |
%541 = torch.aten.clone %540, %none_565 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_566 = torch.constant.float 1.000000e+00 | |
%542 = torch.aten.div.Scalar %541, %float1.000000e00_566 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_567 = torch.constant.int 1 | |
%543 = torch.aten.add.Tensor %542, %477, %int1_567 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_568 = torch.constant.int 6 | |
%544 = torch.prims.convert_element_type %543, %int6_568 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_569 = torch.constant.int 2 | |
%545 = torch.prim.ListConstruct %int2_569 : (!torch.int) -> !torch.list<int> | |
%int0_570 = torch.constant.int 0 | |
%true_571 = torch.constant.bool true | |
%result0_572, %result1_573 = torch.aten.var_mean.correction %544, %545, %int0_570, %true_571 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_574 = torch.constant.float 1.000000e-05 | |
%int1_575 = torch.constant.int 1 | |
%546 = torch.aten.add.Scalar %result0_572, %float1.000000e-05_574, %int1_575 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%547 = torch.aten.rsqrt %546 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_576 = torch.constant.int 1 | |
%548 = torch.aten.sub.Tensor %543, %result1_573, %int1_576 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%549 = torch.aten.mul.Tensor %548, %547 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight : tensor<640xf16> | |
%550 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%551 = torch.aten.mul.Tensor %549, %550 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias : tensor<640xf16> | |
%552 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_577 = torch.constant.int 1 | |
%553 = torch.aten.add.Tensor %551, %552, %int1_577 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_578 = torch.constant.int 5 | |
%554 = torch.prims.convert_element_type %553, %int5_578 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_579 = torch.constant.int 5 | |
%555 = torch.prims.convert_element_type %result1_573, %int5_579 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_580 = torch.constant.int 5 | |
%556 = torch.prims.convert_element_type %547, %int5_580 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight : tensor<640x640xf16> | |
%557 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_581 = torch.constant.int 0 | |
%int1_582 = torch.constant.int 1 | |
%558 = torch.aten.transpose.int %557, %int0_581, %int1_582 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_583 = torch.constant.int 8192 | |
%int640_584 = torch.constant.int 640 | |
%559 = torch.prim.ListConstruct %int8192_583, %int640_584 : (!torch.int, !torch.int) -> !torch.list<int> | |
%560 = torch.aten.view %554, %559 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%561 = torch.aten.mm %560, %558 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_585 = torch.constant.int 2 | |
%int4096_586 = torch.constant.int 4096 | |
%int640_587 = torch.constant.int 640 | |
%562 = torch.prim.ListConstruct %int2_585, %int4096_586, %int640_587 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%563 = torch.aten.view %561, %562 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight : tensor<640x2048xf16> | |
%564 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_k.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_588 = torch.constant.int 0 | |
%int1_589 = torch.constant.int 1 | |
%565 = torch.aten.transpose.int %564, %int0_588, %int1_589 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_590 = torch.constant.int 128 | |
%int2048 = torch.constant.int 2048 | |
%566 = torch.prim.ListConstruct %int128_590, %int2048 : (!torch.int, !torch.int) -> !torch.list<int> | |
%567 = torch.aten.view %arg1, %566 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%568 = torch.aten.mm %567, %565 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_591 = torch.constant.int 2 | |
%int64_592 = torch.constant.int 64 | |
%int640_593 = torch.constant.int 640 | |
%569 = torch.prim.ListConstruct %int2_591, %int64_592, %int640_593 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%570 = torch.aten.view %568, %569 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight : tensor<640x2048xf16> | |
%571 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_v.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_594 = torch.constant.int 0 | |
%int1_595 = torch.constant.int 1 | |
%572 = torch.aten.transpose.int %571, %int0_594, %int1_595 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_596 = torch.constant.int 128 | |
%int2048_597 = torch.constant.int 2048 | |
%573 = torch.prim.ListConstruct %int128_596, %int2048_597 : (!torch.int, !torch.int) -> !torch.list<int> | |
%574 = torch.aten.view %arg1, %573 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%575 = torch.aten.mm %574, %572 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_598 = torch.constant.int 2 | |
%int64_599 = torch.constant.int 64 | |
%int640_600 = torch.constant.int 640 | |
%576 = torch.prim.ListConstruct %int2_598, %int64_599, %int640_600 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%577 = torch.aten.view %575, %576 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%int2_601 = torch.constant.int 2 | |
%int-1_602 = torch.constant.int -1 | |
%int10_603 = torch.constant.int 10 | |
%int64_604 = torch.constant.int 64 | |
%578 = torch.prim.ListConstruct %int2_601, %int-1_602, %int10_603, %int64_604 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%579 = torch.aten.view %563, %578 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_605 = torch.constant.int 1 | |
%int2_606 = torch.constant.int 2 | |
%580 = torch.aten.transpose.int %579, %int1_605, %int2_606 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_607 = torch.constant.int 2 | |
%int-1_608 = torch.constant.int -1 | |
%int10_609 = torch.constant.int 10 | |
%int64_610 = torch.constant.int 64 | |
%581 = torch.prim.ListConstruct %int2_607, %int-1_608, %int10_609, %int64_610 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%582 = torch.aten.view %570, %581 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_611 = torch.constant.int 1 | |
%int2_612 = torch.constant.int 2 | |
%583 = torch.aten.transpose.int %582, %int1_611, %int2_612 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%int2_613 = torch.constant.int 2 | |
%int-1_614 = torch.constant.int -1 | |
%int10_615 = torch.constant.int 10 | |
%int64_616 = torch.constant.int 64 | |
%584 = torch.prim.ListConstruct %int2_613, %int-1_614, %int10_615, %int64_616 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%585 = torch.aten.view %577, %584 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_617 = torch.constant.int 1 | |
%int2_618 = torch.constant.int 2 | |
%586 = torch.aten.transpose.int %585, %int1_617, %int2_618 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%float0.000000e00_619 = torch.constant.float 0.000000e+00 | |
%false_620 = torch.constant.bool false | |
%none_621 = torch.constant.none | |
%none_622 = torch.constant.none | |
%587:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%580, %583, %586, %float0.000000e00_619, %false_620, %none_621, %none_622) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%588 = torch.aten.detach %587#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_623 = torch.constant.int 1 | |
%int2_624 = torch.constant.int 2 | |
%589 = torch.aten.transpose.int %587#0, %int1_623, %int2_624 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_625 = torch.constant.int 2 | |
%int-1_626 = torch.constant.int -1 | |
%int640_627 = torch.constant.int 640 | |
%590 = torch.prim.ListConstruct %int2_625, %int-1_626, %int640_627 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%591 = torch.aten.view %589, %590 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_628 = torch.constant.int 8192 | |
%int640_629 = torch.constant.int 640 | |
%592 = torch.prim.ListConstruct %int8192_628, %int640_629 : (!torch.int, !torch.int) -> !torch.list<int> | |
%593 = torch.aten.view %591, %592 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight : tensor<640x640xf16> | |
%594 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_630 = torch.constant.int 0 | |
%int1_631 = torch.constant.int 1 | |
%595 = torch.aten.transpose.int %594, %int0_630, %int1_631 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<640xf16> | |
%596 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_632 = torch.constant.int 6 | |
%597 = torch.prims.convert_element_type %596, %int6_632 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_633 = torch.constant.int 6 | |
%598 = torch.prims.convert_element_type %593, %int6_633 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_634 = torch.constant.int 6 | |
%599 = torch.prims.convert_element_type %595, %int6_634 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%600 = torch.aten.mm %598, %599 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_635 = torch.constant.int 1 | |
%601 = torch.aten.mul.Scalar %600, %int1_635 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_636 = torch.constant.int 1 | |
%602 = torch.aten.mul.Scalar %597, %int1_636 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_637 = torch.constant.int 1 | |
%603 = torch.aten.add.Tensor %601, %602, %int1_637 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_638 = torch.constant.int 5 | |
%604 = torch.prims.convert_element_type %603, %int5_638 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_639 = torch.constant.int 2 | |
%int4096_640 = torch.constant.int 4096 | |
%int640_641 = torch.constant.int 640 | |
%605 = torch.prim.ListConstruct %int2_639, %int4096_640, %int640_641 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%606 = torch.aten.view %604, %605 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_642 = torch.constant.none | |
%607 = torch.aten.clone %606, %none_642 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_643 = torch.constant.float 1.000000e+00 | |
%608 = torch.aten.div.Scalar %607, %float1.000000e00_643 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_644 = torch.constant.int 1 | |
%609 = torch.aten.add.Tensor %608, %543, %int1_644 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_645 = torch.constant.int 6 | |
%610 = torch.prims.convert_element_type %609, %int6_645 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_646 = torch.constant.int 2 | |
%611 = torch.prim.ListConstruct %int2_646 : (!torch.int) -> !torch.list<int> | |
%int0_647 = torch.constant.int 0 | |
%true_648 = torch.constant.bool true | |
%result0_649, %result1_650 = torch.aten.var_mean.correction %610, %611, %int0_647, %true_648 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_651 = torch.constant.float 1.000000e-05 | |
%int1_652 = torch.constant.int 1 | |
%612 = torch.aten.add.Scalar %result0_649, %float1.000000e-05_651, %int1_652 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%613 = torch.aten.rsqrt %612 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_653 = torch.constant.int 1 | |
%614 = torch.aten.sub.Tensor %609, %result1_650, %int1_653 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%615 = torch.aten.mul.Tensor %614, %613 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight : tensor<640xf16> | |
%616 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%617 = torch.aten.mul.Tensor %615, %616 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias : tensor<640xf16> | |
%618 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_654 = torch.constant.int 1 | |
%619 = torch.aten.add.Tensor %617, %618, %int1_654 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_655 = torch.constant.int 5 | |
%620 = torch.prims.convert_element_type %619, %int5_655 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_656 = torch.constant.int 5 | |
%621 = torch.prims.convert_element_type %result1_650, %int5_656 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_657 = torch.constant.int 5 | |
%622 = torch.prims.convert_element_type %613, %int5_657 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int8192_658 = torch.constant.int 8192 | |
%int640_659 = torch.constant.int 640 | |
%623 = torch.prim.ListConstruct %int8192_658, %int640_659 : (!torch.int, !torch.int) -> !torch.list<int> | |
%624 = torch.aten.view %620, %623 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight : tensor<5120x640xf16> | |
%625 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight : tensor<5120x640xf16> -> !torch.vtensor<[5120,640],f16> | |
%int0_660 = torch.constant.int 0 | |
%int1_661 = torch.constant.int 1 | |
%626 = torch.aten.transpose.int %625, %int0_660, %int1_661 : !torch.vtensor<[5120,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,5120],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<5120xf16> | |
%627 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<5120xf16> -> !torch.vtensor<[5120],f16> | |
%int6_662 = torch.constant.int 6 | |
%628 = torch.prims.convert_element_type %627, %int6_662 : !torch.vtensor<[5120],f16>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int6_663 = torch.constant.int 6 | |
%629 = torch.prims.convert_element_type %624, %int6_663 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_664 = torch.constant.int 6 | |
%630 = torch.prims.convert_element_type %626, %int6_664 : !torch.vtensor<[640,5120],f16>, !torch.int -> !torch.vtensor<[640,5120],f32> | |
%631 = torch.aten.mm %629, %630 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,5120],f32> -> !torch.vtensor<[8192,5120],f32> | |
%int1_665 = torch.constant.int 1 | |
%632 = torch.aten.mul.Scalar %631, %int1_665 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int1_666 = torch.constant.int 1 | |
%633 = torch.aten.mul.Scalar %628, %int1_666 : !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int1_667 = torch.constant.int 1 | |
%634 = torch.aten.add.Tensor %632, %633, %int1_667 : !torch.vtensor<[8192,5120],f32>, !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int5_668 = torch.constant.int 5 | |
%635 = torch.prims.convert_element_type %634, %int5_668 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f16> | |
%int2_669 = torch.constant.int 2 | |
%int4096_670 = torch.constant.int 4096 | |
%int5120 = torch.constant.int 5120 | |
%636 = torch.prim.ListConstruct %int2_669, %int4096_670, %int5120 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%637 = torch.aten.view %635, %636 : !torch.vtensor<[8192,5120],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,5120],f16> | |
%int-1_671 = torch.constant.int -1 | |
%int0_672 = torch.constant.int 0 | |
%int2560 = torch.constant.int 2560 | |
%int1_673 = torch.constant.int 1 | |
%638 = torch.aten.slice.Tensor %637, %int-1_671, %int0_672, %int2560, %int1_673 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%int-1_674 = torch.constant.int -1 | |
%int2560_675 = torch.constant.int 2560 | |
%int5120_676 = torch.constant.int 5120 | |
%int1_677 = torch.constant.int 1 | |
%639 = torch.aten.slice.Tensor %637, %int-1_674, %int2560_675, %int5120_676, %int1_677 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%str = torch.constant.str "none" | |
%640 = torch.aten.gelu %639, %str : !torch.vtensor<[2,4096,2560],f16>, !torch.str -> !torch.vtensor<[2,4096,2560],f16> | |
%641 = torch.aten.mul.Tensor %638, %640 : !torch.vtensor<[2,4096,2560],f16>, !torch.vtensor<[2,4096,2560],f16> -> !torch.vtensor<[2,4096,2560],f16> | |
%none_678 = torch.constant.none | |
%642 = torch.aten.clone %641, %none_678 : !torch.vtensor<[2,4096,2560],f16>, !torch.none -> !torch.vtensor<[2,4096,2560],f16> | |
%int8192_679 = torch.constant.int 8192 | |
%int2560_680 = torch.constant.int 2560 | |
%643 = torch.prim.ListConstruct %int8192_679, %int2560_680 : (!torch.int, !torch.int) -> !torch.list<int> | |
%644 = torch.aten.view %642, %643 : !torch.vtensor<[2,4096,2560],f16>, !torch.list<int> -> !torch.vtensor<[8192,2560],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight : tensor<640x2560xf16> | |
%645 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight : tensor<640x2560xf16> -> !torch.vtensor<[640,2560],f16> | |
%int0_681 = torch.constant.int 0 | |
%int1_682 = torch.constant.int 1 | |
%646 = torch.aten.transpose.int %645, %int0_681, %int1_682 : !torch.vtensor<[640,2560],f16>, !torch.int, !torch.int -> !torch.vtensor<[2560,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<640xf16> | |
%647 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_683 = torch.constant.int 6 | |
%648 = torch.prims.convert_element_type %647, %int6_683 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_684 = torch.constant.int 6 | |
%649 = torch.prims.convert_element_type %644, %int6_684 : !torch.vtensor<[8192,2560],f16>, !torch.int -> !torch.vtensor<[8192,2560],f32> | |
%int6_685 = torch.constant.int 6 | |
%650 = torch.prims.convert_element_type %646, %int6_685 : !torch.vtensor<[2560,640],f16>, !torch.int -> !torch.vtensor<[2560,640],f32> | |
%651 = torch.aten.mm %649, %650 : !torch.vtensor<[8192,2560],f32>, !torch.vtensor<[2560,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_686 = torch.constant.int 1 | |
%652 = torch.aten.mul.Scalar %651, %int1_686 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_687 = torch.constant.int 1 | |
%653 = torch.aten.mul.Scalar %648, %int1_687 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_688 = torch.constant.int 1 | |
%654 = torch.aten.add.Tensor %652, %653, %int1_688 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_689 = torch.constant.int 5 | |
%655 = torch.prims.convert_element_type %654, %int5_689 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_690 = torch.constant.int 2 | |
%int4096_691 = torch.constant.int 4096 | |
%int640_692 = torch.constant.int 640 | |
%656 = torch.prim.ListConstruct %int2_690, %int4096_691, %int640_692 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%657 = torch.aten.view %655, %656 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int1_693 = torch.constant.int 1 | |
%658 = torch.aten.add.Tensor %657, %609, %int1_693 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_694 = torch.constant.int 6 | |
%659 = torch.prims.convert_element_type %658, %int6_694 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_695 = torch.constant.int 2 | |
%660 = torch.prim.ListConstruct %int2_695 : (!torch.int) -> !torch.list<int> | |
%int0_696 = torch.constant.int 0 | |
%true_697 = torch.constant.bool true | |
%result0_698, %result1_699 = torch.aten.var_mean.correction %659, %660, %int0_696, %true_697 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_700 = torch.constant.float 1.000000e-05 | |
%int1_701 = torch.constant.int 1 | |
%661 = torch.aten.add.Scalar %result0_698, %float1.000000e-05_700, %int1_701 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%662 = torch.aten.rsqrt %661 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_702 = torch.constant.int 1 | |
%663 = torch.aten.sub.Tensor %658, %result1_699, %int1_702 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%664 = torch.aten.mul.Tensor %663, %662 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight : tensor<640xf16> | |
%665 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%666 = torch.aten.mul.Tensor %664, %665 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias : tensor<640xf16> | |
%667 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_703 = torch.constant.int 1 | |
%668 = torch.aten.add.Tensor %666, %667, %int1_703 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_704 = torch.constant.int 5 | |
%669 = torch.prims.convert_element_type %668, %int5_704 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_705 = torch.constant.int 5 | |
%670 = torch.prims.convert_element_type %result1_699, %int5_705 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_706 = torch.constant.int 5 | |
%671 = torch.prims.convert_element_type %662, %int5_706 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight : tensor<640x640xf16> | |
%672 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_707 = torch.constant.int 0 | |
%int1_708 = torch.constant.int 1 | |
%673 = torch.aten.transpose.int %672, %int0_707, %int1_708 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_709 = torch.constant.int 8192 | |
%int640_710 = torch.constant.int 640 | |
%674 = torch.prim.ListConstruct %int8192_709, %int640_710 : (!torch.int, !torch.int) -> !torch.list<int> | |
%675 = torch.aten.view %669, %674 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%676 = torch.aten.mm %675, %673 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_711 = torch.constant.int 2 | |
%int4096_712 = torch.constant.int 4096 | |
%int640_713 = torch.constant.int 640 | |
%677 = torch.prim.ListConstruct %int2_711, %int4096_712, %int640_713 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%678 = torch.aten.view %676, %677 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight : tensor<640x640xf16> | |
%679 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_k.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_714 = torch.constant.int 0 | |
%int1_715 = torch.constant.int 1 | |
%680 = torch.aten.transpose.int %679, %int0_714, %int1_715 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_716 = torch.constant.int 8192 | |
%int640_717 = torch.constant.int 640 | |
%681 = torch.prim.ListConstruct %int8192_716, %int640_717 : (!torch.int, !torch.int) -> !torch.list<int> | |
%682 = torch.aten.view %669, %681 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%683 = torch.aten.mm %682, %680 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_718 = torch.constant.int 2 | |
%int4096_719 = torch.constant.int 4096 | |
%int640_720 = torch.constant.int 640 | |
%684 = torch.prim.ListConstruct %int2_718, %int4096_719, %int640_720 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%685 = torch.aten.view %683, %684 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight : tensor<640x640xf16> | |
%686 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_v.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_721 = torch.constant.int 0 | |
%int1_722 = torch.constant.int 1 | |
%687 = torch.aten.transpose.int %686, %int0_721, %int1_722 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_723 = torch.constant.int 8192 | |
%int640_724 = torch.constant.int 640 | |
%688 = torch.prim.ListConstruct %int8192_723, %int640_724 : (!torch.int, !torch.int) -> !torch.list<int> | |
%689 = torch.aten.view %669, %688 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%690 = torch.aten.mm %689, %687 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_725 = torch.constant.int 2 | |
%int4096_726 = torch.constant.int 4096 | |
%int640_727 = torch.constant.int 640 | |
%691 = torch.prim.ListConstruct %int2_725, %int4096_726, %int640_727 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%692 = torch.aten.view %690, %691 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int2_728 = torch.constant.int 2 | |
%int-1_729 = torch.constant.int -1 | |
%int10_730 = torch.constant.int 10 | |
%int64_731 = torch.constant.int 64 | |
%693 = torch.prim.ListConstruct %int2_728, %int-1_729, %int10_730, %int64_731 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%694 = torch.aten.view %678, %693 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_732 = torch.constant.int 1 | |
%int2_733 = torch.constant.int 2 | |
%695 = torch.aten.transpose.int %694, %int1_732, %int2_733 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_734 = torch.constant.int 2 | |
%int-1_735 = torch.constant.int -1 | |
%int10_736 = torch.constant.int 10 | |
%int64_737 = torch.constant.int 64 | |
%696 = torch.prim.ListConstruct %int2_734, %int-1_735, %int10_736, %int64_737 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%697 = torch.aten.view %685, %696 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_738 = torch.constant.int 1 | |
%int2_739 = torch.constant.int 2 | |
%698 = torch.aten.transpose.int %697, %int1_738, %int2_739 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_740 = torch.constant.int 2 | |
%int-1_741 = torch.constant.int -1 | |
%int10_742 = torch.constant.int 10 | |
%int64_743 = torch.constant.int 64 | |
%699 = torch.prim.ListConstruct %int2_740, %int-1_741, %int10_742, %int64_743 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%700 = torch.aten.view %692, %699 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_744 = torch.constant.int 1 | |
%int2_745 = torch.constant.int 2 | |
%701 = torch.aten.transpose.int %700, %int1_744, %int2_745 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%float0.000000e00_746 = torch.constant.float 0.000000e+00 | |
%false_747 = torch.constant.bool false | |
%none_748 = torch.constant.none | |
%none_749 = torch.constant.none | |
%702:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%695, %698, %701, %float0.000000e00_746, %false_747, %none_748, %none_749) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%703 = torch.aten.detach %702#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_750 = torch.constant.int 1 | |
%int2_751 = torch.constant.int 2 | |
%704 = torch.aten.transpose.int %702#0, %int1_750, %int2_751 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_752 = torch.constant.int 2 | |
%int-1_753 = torch.constant.int -1 | |
%int640_754 = torch.constant.int 640 | |
%705 = torch.prim.ListConstruct %int2_752, %int-1_753, %int640_754 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%706 = torch.aten.view %704, %705 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_755 = torch.constant.int 8192 | |
%int640_756 = torch.constant.int 640 | |
%707 = torch.prim.ListConstruct %int8192_755, %int640_756 : (!torch.int, !torch.int) -> !torch.list<int> | |
%708 = torch.aten.view %706, %707 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight : tensor<640x640xf16> | |
%709 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_757 = torch.constant.int 0 | |
%int1_758 = torch.constant.int 1 | |
%710 = torch.aten.transpose.int %709, %int0_757, %int1_758 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<640xf16> | |
%711 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_759 = torch.constant.int 6 | |
%712 = torch.prims.convert_element_type %711, %int6_759 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_760 = torch.constant.int 6 | |
%713 = torch.prims.convert_element_type %708, %int6_760 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_761 = torch.constant.int 6 | |
%714 = torch.prims.convert_element_type %710, %int6_761 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%715 = torch.aten.mm %713, %714 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_762 = torch.constant.int 1 | |
%716 = torch.aten.mul.Scalar %715, %int1_762 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_763 = torch.constant.int 1 | |
%717 = torch.aten.mul.Scalar %712, %int1_763 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_764 = torch.constant.int 1 | |
%718 = torch.aten.add.Tensor %716, %717, %int1_764 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_765 = torch.constant.int 5 | |
%719 = torch.prims.convert_element_type %718, %int5_765 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_766 = torch.constant.int 2 | |
%int4096_767 = torch.constant.int 4096 | |
%int640_768 = torch.constant.int 640 | |
%720 = torch.prim.ListConstruct %int2_766, %int4096_767, %int640_768 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%721 = torch.aten.view %719, %720 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_769 = torch.constant.none | |
%722 = torch.aten.clone %721, %none_769 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_770 = torch.constant.float 1.000000e+00 | |
%723 = torch.aten.div.Scalar %722, %float1.000000e00_770 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_771 = torch.constant.int 1 | |
%724 = torch.aten.add.Tensor %723, %658, %int1_771 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_772 = torch.constant.int 6 | |
%725 = torch.prims.convert_element_type %724, %int6_772 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_773 = torch.constant.int 2 | |
%726 = torch.prim.ListConstruct %int2_773 : (!torch.int) -> !torch.list<int> | |
%int0_774 = torch.constant.int 0 | |
%true_775 = torch.constant.bool true | |
%result0_776, %result1_777 = torch.aten.var_mean.correction %725, %726, %int0_774, %true_775 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_778 = torch.constant.float 1.000000e-05 | |
%int1_779 = torch.constant.int 1 | |
%727 = torch.aten.add.Scalar %result0_776, %float1.000000e-05_778, %int1_779 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%728 = torch.aten.rsqrt %727 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_780 = torch.constant.int 1 | |
%729 = torch.aten.sub.Tensor %724, %result1_777, %int1_780 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%730 = torch.aten.mul.Tensor %729, %728 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight : tensor<640xf16> | |
%731 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%732 = torch.aten.mul.Tensor %730, %731 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias : tensor<640xf16> | |
%733 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_781 = torch.constant.int 1 | |
%734 = torch.aten.add.Tensor %732, %733, %int1_781 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_782 = torch.constant.int 5 | |
%735 = torch.prims.convert_element_type %734, %int5_782 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_783 = torch.constant.int 5 | |
%736 = torch.prims.convert_element_type %result1_777, %int5_783 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_784 = torch.constant.int 5 | |
%737 = torch.prims.convert_element_type %728, %int5_784 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight : tensor<640x640xf16> | |
%738 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_785 = torch.constant.int 0 | |
%int1_786 = torch.constant.int 1 | |
%739 = torch.aten.transpose.int %738, %int0_785, %int1_786 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_787 = torch.constant.int 8192 | |
%int640_788 = torch.constant.int 640 | |
%740 = torch.prim.ListConstruct %int8192_787, %int640_788 : (!torch.int, !torch.int) -> !torch.list<int> | |
%741 = torch.aten.view %735, %740 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%742 = torch.aten.mm %741, %739 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_789 = torch.constant.int 2 | |
%int4096_790 = torch.constant.int 4096 | |
%int640_791 = torch.constant.int 640 | |
%743 = torch.prim.ListConstruct %int2_789, %int4096_790, %int640_791 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%744 = torch.aten.view %742, %743 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight : tensor<640x2048xf16> | |
%745 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_k.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_792 = torch.constant.int 0 | |
%int1_793 = torch.constant.int 1 | |
%746 = torch.aten.transpose.int %745, %int0_792, %int1_793 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_794 = torch.constant.int 128 | |
%int2048_795 = torch.constant.int 2048 | |
%747 = torch.prim.ListConstruct %int128_794, %int2048_795 : (!torch.int, !torch.int) -> !torch.list<int> | |
%748 = torch.aten.view %arg1, %747 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%749 = torch.aten.mm %748, %746 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_796 = torch.constant.int 2 | |
%int64_797 = torch.constant.int 64 | |
%int640_798 = torch.constant.int 640 | |
%750 = torch.prim.ListConstruct %int2_796, %int64_797, %int640_798 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%751 = torch.aten.view %749, %750 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight : tensor<640x2048xf16> | |
%752 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_v.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_799 = torch.constant.int 0 | |
%int1_800 = torch.constant.int 1 | |
%753 = torch.aten.transpose.int %752, %int0_799, %int1_800 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_801 = torch.constant.int 128 | |
%int2048_802 = torch.constant.int 2048 | |
%754 = torch.prim.ListConstruct %int128_801, %int2048_802 : (!torch.int, !torch.int) -> !torch.list<int> | |
%755 = torch.aten.view %arg1, %754 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%756 = torch.aten.mm %755, %753 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_803 = torch.constant.int 2 | |
%int64_804 = torch.constant.int 64 | |
%int640_805 = torch.constant.int 640 | |
%757 = torch.prim.ListConstruct %int2_803, %int64_804, %int640_805 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%758 = torch.aten.view %756, %757 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%int2_806 = torch.constant.int 2 | |
%int-1_807 = torch.constant.int -1 | |
%int10_808 = torch.constant.int 10 | |
%int64_809 = torch.constant.int 64 | |
%759 = torch.prim.ListConstruct %int2_806, %int-1_807, %int10_808, %int64_809 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%760 = torch.aten.view %744, %759 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_810 = torch.constant.int 1 | |
%int2_811 = torch.constant.int 2 | |
%761 = torch.aten.transpose.int %760, %int1_810, %int2_811 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_812 = torch.constant.int 2 | |
%int-1_813 = torch.constant.int -1 | |
%int10_814 = torch.constant.int 10 | |
%int64_815 = torch.constant.int 64 | |
%762 = torch.prim.ListConstruct %int2_812, %int-1_813, %int10_814, %int64_815 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%763 = torch.aten.view %751, %762 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_816 = torch.constant.int 1 | |
%int2_817 = torch.constant.int 2 | |
%764 = torch.aten.transpose.int %763, %int1_816, %int2_817 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%int2_818 = torch.constant.int 2 | |
%int-1_819 = torch.constant.int -1 | |
%int10_820 = torch.constant.int 10 | |
%int64_821 = torch.constant.int 64 | |
%765 = torch.prim.ListConstruct %int2_818, %int-1_819, %int10_820, %int64_821 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%766 = torch.aten.view %758, %765 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_822 = torch.constant.int 1 | |
%int2_823 = torch.constant.int 2 | |
%767 = torch.aten.transpose.int %766, %int1_822, %int2_823 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%float0.000000e00_824 = torch.constant.float 0.000000e+00 | |
%false_825 = torch.constant.bool false | |
%none_826 = torch.constant.none | |
%none_827 = torch.constant.none | |
%768:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%761, %764, %767, %float0.000000e00_824, %false_825, %none_826, %none_827) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%769 = torch.aten.detach %768#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_828 = torch.constant.int 1 | |
%int2_829 = torch.constant.int 2 | |
%770 = torch.aten.transpose.int %768#0, %int1_828, %int2_829 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_830 = torch.constant.int 2 | |
%int-1_831 = torch.constant.int -1 | |
%int640_832 = torch.constant.int 640 | |
%771 = torch.prim.ListConstruct %int2_830, %int-1_831, %int640_832 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%772 = torch.aten.view %770, %771 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_833 = torch.constant.int 8192 | |
%int640_834 = torch.constant.int 640 | |
%773 = torch.prim.ListConstruct %int8192_833, %int640_834 : (!torch.int, !torch.int) -> !torch.list<int> | |
%774 = torch.aten.view %772, %773 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight : tensor<640x640xf16> | |
%775 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_835 = torch.constant.int 0 | |
%int1_836 = torch.constant.int 1 | |
%776 = torch.aten.transpose.int %775, %int0_835, %int1_836 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<640xf16> | |
%777 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_837 = torch.constant.int 6 | |
%778 = torch.prims.convert_element_type %777, %int6_837 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_838 = torch.constant.int 6 | |
%779 = torch.prims.convert_element_type %774, %int6_838 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_839 = torch.constant.int 6 | |
%780 = torch.prims.convert_element_type %776, %int6_839 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%781 = torch.aten.mm %779, %780 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_840 = torch.constant.int 1 | |
%782 = torch.aten.mul.Scalar %781, %int1_840 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_841 = torch.constant.int 1 | |
%783 = torch.aten.mul.Scalar %778, %int1_841 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_842 = torch.constant.int 1 | |
%784 = torch.aten.add.Tensor %782, %783, %int1_842 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_843 = torch.constant.int 5 | |
%785 = torch.prims.convert_element_type %784, %int5_843 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_844 = torch.constant.int 2 | |
%int4096_845 = torch.constant.int 4096 | |
%int640_846 = torch.constant.int 640 | |
%786 = torch.prim.ListConstruct %int2_844, %int4096_845, %int640_846 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%787 = torch.aten.view %785, %786 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_847 = torch.constant.none | |
%788 = torch.aten.clone %787, %none_847 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_848 = torch.constant.float 1.000000e+00 | |
%789 = torch.aten.div.Scalar %788, %float1.000000e00_848 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_849 = torch.constant.int 1 | |
%790 = torch.aten.add.Tensor %789, %724, %int1_849 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_850 = torch.constant.int 6 | |
%791 = torch.prims.convert_element_type %790, %int6_850 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_851 = torch.constant.int 2 | |
%792 = torch.prim.ListConstruct %int2_851 : (!torch.int) -> !torch.list<int> | |
%int0_852 = torch.constant.int 0 | |
%true_853 = torch.constant.bool true | |
%result0_854, %result1_855 = torch.aten.var_mean.correction %791, %792, %int0_852, %true_853 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_856 = torch.constant.float 1.000000e-05 | |
%int1_857 = torch.constant.int 1 | |
%793 = torch.aten.add.Scalar %result0_854, %float1.000000e-05_856, %int1_857 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%794 = torch.aten.rsqrt %793 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_858 = torch.constant.int 1 | |
%795 = torch.aten.sub.Tensor %790, %result1_855, %int1_858 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%796 = torch.aten.mul.Tensor %795, %794 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight : tensor<640xf16> | |
%797 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%798 = torch.aten.mul.Tensor %796, %797 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias : tensor<640xf16> | |
%799 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_859 = torch.constant.int 1 | |
%800 = torch.aten.add.Tensor %798, %799, %int1_859 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_860 = torch.constant.int 5 | |
%801 = torch.prims.convert_element_type %800, %int5_860 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_861 = torch.constant.int 5 | |
%802 = torch.prims.convert_element_type %result1_855, %int5_861 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_862 = torch.constant.int 5 | |
%803 = torch.prims.convert_element_type %794, %int5_862 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int8192_863 = torch.constant.int 8192 | |
%int640_864 = torch.constant.int 640 | |
%804 = torch.prim.ListConstruct %int8192_863, %int640_864 : (!torch.int, !torch.int) -> !torch.list<int> | |
%805 = torch.aten.view %801, %804 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight : tensor<5120x640xf16> | |
%806 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight : tensor<5120x640xf16> -> !torch.vtensor<[5120,640],f16> | |
%int0_865 = torch.constant.int 0 | |
%int1_866 = torch.constant.int 1 | |
%807 = torch.aten.transpose.int %806, %int0_865, %int1_866 : !torch.vtensor<[5120,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,5120],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<5120xf16> | |
%808 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<5120xf16> -> !torch.vtensor<[5120],f16> | |
%int6_867 = torch.constant.int 6 | |
%809 = torch.prims.convert_element_type %808, %int6_867 : !torch.vtensor<[5120],f16>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int6_868 = torch.constant.int 6 | |
%810 = torch.prims.convert_element_type %805, %int6_868 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_869 = torch.constant.int 6 | |
%811 = torch.prims.convert_element_type %807, %int6_869 : !torch.vtensor<[640,5120],f16>, !torch.int -> !torch.vtensor<[640,5120],f32> | |
%812 = torch.aten.mm %810, %811 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,5120],f32> -> !torch.vtensor<[8192,5120],f32> | |
%int1_870 = torch.constant.int 1 | |
%813 = torch.aten.mul.Scalar %812, %int1_870 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int1_871 = torch.constant.int 1 | |
%814 = torch.aten.mul.Scalar %809, %int1_871 : !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int1_872 = torch.constant.int 1 | |
%815 = torch.aten.add.Tensor %813, %814, %int1_872 : !torch.vtensor<[8192,5120],f32>, !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int5_873 = torch.constant.int 5 | |
%816 = torch.prims.convert_element_type %815, %int5_873 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f16> | |
%int2_874 = torch.constant.int 2 | |
%int4096_875 = torch.constant.int 4096 | |
%int5120_876 = torch.constant.int 5120 | |
%817 = torch.prim.ListConstruct %int2_874, %int4096_875, %int5120_876 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%818 = torch.aten.view %816, %817 : !torch.vtensor<[8192,5120],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,5120],f16> | |
%int-1_877 = torch.constant.int -1 | |
%int0_878 = torch.constant.int 0 | |
%int2560_879 = torch.constant.int 2560 | |
%int1_880 = torch.constant.int 1 | |
%819 = torch.aten.slice.Tensor %818, %int-1_877, %int0_878, %int2560_879, %int1_880 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%int-1_881 = torch.constant.int -1 | |
%int2560_882 = torch.constant.int 2560 | |
%int5120_883 = torch.constant.int 5120 | |
%int1_884 = torch.constant.int 1 | |
%820 = torch.aten.slice.Tensor %818, %int-1_881, %int2560_882, %int5120_883, %int1_884 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%str_885 = torch.constant.str "none" | |
%821 = torch.aten.gelu %820, %str_885 : !torch.vtensor<[2,4096,2560],f16>, !torch.str -> !torch.vtensor<[2,4096,2560],f16> | |
%822 = torch.aten.mul.Tensor %819, %821 : !torch.vtensor<[2,4096,2560],f16>, !torch.vtensor<[2,4096,2560],f16> -> !torch.vtensor<[2,4096,2560],f16> | |
%none_886 = torch.constant.none | |
%823 = torch.aten.clone %822, %none_886 : !torch.vtensor<[2,4096,2560],f16>, !torch.none -> !torch.vtensor<[2,4096,2560],f16> | |
%int8192_887 = torch.constant.int 8192 | |
%int2560_888 = torch.constant.int 2560 | |
%824 = torch.prim.ListConstruct %int8192_887, %int2560_888 : (!torch.int, !torch.int) -> !torch.list<int> | |
%825 = torch.aten.view %823, %824 : !torch.vtensor<[2,4096,2560],f16>, !torch.list<int> -> !torch.vtensor<[8192,2560],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight : tensor<640x2560xf16> | |
%826 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight : tensor<640x2560xf16> -> !torch.vtensor<[640,2560],f16> | |
%int0_889 = torch.constant.int 0 | |
%int1_890 = torch.constant.int 1 | |
%827 = torch.aten.transpose.int %826, %int0_889, %int1_890 : !torch.vtensor<[640,2560],f16>, !torch.int, !torch.int -> !torch.vtensor<[2560,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<640xf16> | |
%828 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_891 = torch.constant.int 6 | |
%829 = torch.prims.convert_element_type %828, %int6_891 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_892 = torch.constant.int 6 | |
%830 = torch.prims.convert_element_type %825, %int6_892 : !torch.vtensor<[8192,2560],f16>, !torch.int -> !torch.vtensor<[8192,2560],f32> | |
%int6_893 = torch.constant.int 6 | |
%831 = torch.prims.convert_element_type %827, %int6_893 : !torch.vtensor<[2560,640],f16>, !torch.int -> !torch.vtensor<[2560,640],f32> | |
%832 = torch.aten.mm %830, %831 : !torch.vtensor<[8192,2560],f32>, !torch.vtensor<[2560,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_894 = torch.constant.int 1 | |
%833 = torch.aten.mul.Scalar %832, %int1_894 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_895 = torch.constant.int 1 | |
%834 = torch.aten.mul.Scalar %829, %int1_895 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_896 = torch.constant.int 1 | |
%835 = torch.aten.add.Tensor %833, %834, %int1_896 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_897 = torch.constant.int 5 | |
%836 = torch.prims.convert_element_type %835, %int5_897 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_898 = torch.constant.int 2 | |
%int4096_899 = torch.constant.int 4096 | |
%int640_900 = torch.constant.int 640 | |
%837 = torch.prim.ListConstruct %int2_898, %int4096_899, %int640_900 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%838 = torch.aten.view %836, %837 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int1_901 = torch.constant.int 1 | |
%839 = torch.aten.add.Tensor %838, %790, %int1_901 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_902 = torch.constant.int 8192 | |
%int640_903 = torch.constant.int 640 | |
%840 = torch.prim.ListConstruct %int8192_902, %int640_903 : (!torch.int, !torch.int) -> !torch.list<int> | |
%841 = torch.aten.view %839, %840 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.proj_out.weight = util.global.load @_params.unet.down_blocks.1.attentions.0.proj_out.weight : tensor<640x640xf16> | |
%842 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.proj_out.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_904 = torch.constant.int 0 | |
%int1_905 = torch.constant.int 1 | |
%843 = torch.aten.transpose.int %842, %int0_904, %int1_905 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.0.proj_out.bias = util.global.load @_params.unet.down_blocks.1.attentions.0.proj_out.bias : tensor<640xf16> | |
%844 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.0.proj_out.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_906 = torch.constant.int 6 | |
%845 = torch.prims.convert_element_type %844, %int6_906 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_907 = torch.constant.int 6 | |
%846 = torch.prims.convert_element_type %841, %int6_907 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_908 = torch.constant.int 6 | |
%847 = torch.prims.convert_element_type %843, %int6_908 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%848 = torch.aten.mm %846, %847 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_909 = torch.constant.int 1 | |
%849 = torch.aten.mul.Scalar %848, %int1_909 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_910 = torch.constant.int 1 | |
%850 = torch.aten.mul.Scalar %845, %int1_910 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_911 = torch.constant.int 1 | |
%851 = torch.aten.add.Tensor %849, %850, %int1_911 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_912 = torch.constant.int 5 | |
%852 = torch.prims.convert_element_type %851, %int5_912 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_913 = torch.constant.int 2 | |
%int4096_914 = torch.constant.int 4096 | |
%int640_915 = torch.constant.int 640 | |
%853 = torch.prim.ListConstruct %int2_913, %int4096_914, %int640_915 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%854 = torch.aten.view %852, %853 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int2_916 = torch.constant.int 2 | |
%int64_917 = torch.constant.int 64 | |
%int64_918 = torch.constant.int 64 | |
%int640_919 = torch.constant.int 640 | |
%855 = torch.prim.ListConstruct %int2_916, %int64_917, %int64_918, %int640_919 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%856 = torch.aten.view %854, %855 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,64,640],f16> | |
%int0_920 = torch.constant.int 0 | |
%int3_921 = torch.constant.int 3 | |
%int1_922 = torch.constant.int 1 | |
%int2_923 = torch.constant.int 2 | |
%857 = torch.prim.ListConstruct %int0_920, %int3_921, %int1_922, %int2_923 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%858 = torch.aten.permute %856, %857 : !torch.vtensor<[2,64,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f16> | |
%int0_924 = torch.constant.int 0 | |
%859 = torch.aten.clone %858, %int0_924 : !torch.vtensor<[2,640,64,64],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int1_925 = torch.constant.int 1 | |
%860 = torch.aten.add.Tensor %859, %430, %int1_925 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[2,640,64,64],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int2_926 = torch.constant.int 2 | |
%int32_927 = torch.constant.int 32 | |
%int20_928 = torch.constant.int 20 | |
%int4096_929 = torch.constant.int 4096 | |
%861 = torch.prim.ListConstruct %int2_926, %int32_927, %int20_928, %int4096_929 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%862 = torch.aten.view %860, %861 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,32,20,4096],f16> | |
%int6_930 = torch.constant.int 6 | |
%863 = torch.prims.convert_element_type %862, %int6_930 : !torch.vtensor<[2,32,20,4096],f16>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_931 = torch.constant.int 2 | |
%int3_932 = torch.constant.int 3 | |
%864 = torch.prim.ListConstruct %int2_931, %int3_932 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_933 = torch.constant.int 0 | |
%true_934 = torch.constant.bool true | |
%result0_935, %result1_936 = torch.aten.var_mean.correction %863, %864, %int0_933, %true_934 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_937 = torch.constant.float 1.000000e-05 | |
%int1_938 = torch.constant.int 1 | |
%865 = torch.aten.add.Scalar %result0_935, %float1.000000e-05_937, %int1_938 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%866 = torch.aten.rsqrt %865 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_939 = torch.constant.int 1 | |
%867 = torch.aten.sub.Tensor %862, %result1_936, %int1_939 : !torch.vtensor<[2,32,20,4096],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%868 = torch.aten.mul.Tensor %867, %866 : !torch.vtensor<[2,32,20,4096],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_940 = torch.constant.int 2 | |
%int640_941 = torch.constant.int 640 | |
%int64_942 = torch.constant.int 64 | |
%int64_943 = torch.constant.int 64 | |
%869 = torch.prim.ListConstruct %int2_940, %int640_941, %int64_942, %int64_943 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%870 = torch.aten.view %868, %869 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f32> | |
%_params.unet.down_blocks.1.resnets.1.norm1.bias = util.global.load @_params.unet.down_blocks.1.resnets.1.norm1.bias : tensor<640xf16> | |
%871 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.norm1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_944 = torch.constant.int 0 | |
%872 = torch.aten.unsqueeze %871, %int0_944 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_945 = torch.constant.int 2 | |
%873 = torch.aten.unsqueeze %872, %int2_945 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_946 = torch.constant.int 3 | |
%874 = torch.aten.unsqueeze %873, %int3_946 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%_params.unet.down_blocks.1.resnets.1.norm1.weight = util.global.load @_params.unet.down_blocks.1.resnets.1.norm1.weight : tensor<640xf16> | |
%875 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.norm1.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_947 = torch.constant.int 0 | |
%876 = torch.aten.unsqueeze %875, %int0_947 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_948 = torch.constant.int 2 | |
%877 = torch.aten.unsqueeze %876, %int2_948 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_949 = torch.constant.int 3 | |
%878 = torch.aten.unsqueeze %877, %int3_949 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%879 = torch.aten.mul.Tensor %870, %878 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16> -> !torch.vtensor<[2,640,64,64],f32> | |
%int1_950 = torch.constant.int 1 | |
%880 = torch.aten.add.Tensor %879, %874, %int1_950 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f32> | |
%int5_951 = torch.constant.int 5 | |
%881 = torch.prims.convert_element_type %880, %int5_951 : !torch.vtensor<[2,640,64,64],f32>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int5_952 = torch.constant.int 5 | |
%882 = torch.prims.convert_element_type %result1_936, %int5_952 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_953 = torch.constant.int 5 | |
%883 = torch.prims.convert_element_type %866, %int5_953 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_954 = torch.constant.int 3 | |
%884 = torch.prim.ListConstruct %int3_954 : (!torch.int) -> !torch.list<int> | |
%885 = torch.prims.squeeze %882, %884 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_955 = torch.constant.int 2 | |
%886 = torch.prim.ListConstruct %int2_955 : (!torch.int) -> !torch.list<int> | |
%887 = torch.prims.squeeze %885, %886 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_956 = torch.constant.int 3 | |
%888 = torch.prim.ListConstruct %int3_956 : (!torch.int) -> !torch.list<int> | |
%889 = torch.prims.squeeze %883, %888 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_957 = torch.constant.int 2 | |
%890 = torch.prim.ListConstruct %int2_957 : (!torch.int) -> !torch.list<int> | |
%891 = torch.prims.squeeze %889, %890 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%892 = torch.aten.detach %887 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%893 = torch.aten.detach %891 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%894 = torch.aten.silu %881 : !torch.vtensor<[2,640,64,64],f16> -> !torch.vtensor<[2,640,64,64],f16> | |
%_params.unet.down_blocks.1.resnets.1.conv1.weight = util.global.load @_params.unet.down_blocks.1.resnets.1.conv1.weight : tensor<640x640x3x3xf16> | |
%895 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.conv1.weight : tensor<640x640x3x3xf16> -> !torch.vtensor<[640,640,3,3],f16> | |
%_params.unet.down_blocks.1.resnets.1.conv1.bias = util.global.load @_params.unet.down_blocks.1.resnets.1.conv1.bias : tensor<640xf16> | |
%896 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.conv1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_958 = torch.constant.int 1 | |
%int1_959 = torch.constant.int 1 | |
%897 = torch.prim.ListConstruct %int1_958, %int1_959 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_960 = torch.constant.int 1 | |
%int1_961 = torch.constant.int 1 | |
%898 = torch.prim.ListConstruct %int1_960, %int1_961 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_962 = torch.constant.int 1 | |
%int1_963 = torch.constant.int 1 | |
%899 = torch.prim.ListConstruct %int1_962, %int1_963 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_964 = torch.constant.bool false | |
%int0_965 = torch.constant.int 0 | |
%int0_966 = torch.constant.int 0 | |
%900 = torch.prim.ListConstruct %int0_965, %int0_966 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_967 = torch.constant.int 1 | |
%901 = torch.aten.convolution %894, %895, %896, %897, %898, %899, %false_964, %900, %int1_967 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[640,640,3,3],f16>, !torch.vtensor<[640],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%902 = torch.aten.silu %103 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.down_blocks.1.resnets.1.time_emb_proj.weight = util.global.load @_params.unet.down_blocks.1.resnets.1.time_emb_proj.weight : tensor<640x1280xf16> | |
%903 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.time_emb_proj.weight : tensor<640x1280xf16> -> !torch.vtensor<[640,1280],f16> | |
%int0_968 = torch.constant.int 0 | |
%int1_969 = torch.constant.int 1 | |
%904 = torch.aten.transpose.int %903, %int0_968, %int1_969 : !torch.vtensor<[640,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,640],f16> | |
%_params.unet.down_blocks.1.resnets.1.time_emb_proj.bias = util.global.load @_params.unet.down_blocks.1.resnets.1.time_emb_proj.bias : tensor<640xf16> | |
%905 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.time_emb_proj.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_970 = torch.constant.int 6 | |
%906 = torch.prims.convert_element_type %905, %int6_970 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_971 = torch.constant.int 6 | |
%907 = torch.prims.convert_element_type %902, %int6_971 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_972 = torch.constant.int 6 | |
%908 = torch.prims.convert_element_type %904, %int6_972 : !torch.vtensor<[1280,640],f16>, !torch.int -> !torch.vtensor<[1280,640],f32> | |
%909 = torch.aten.mm %907, %908 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,640],f32> -> !torch.vtensor<[2,640],f32> | |
%int1_973 = torch.constant.int 1 | |
%910 = torch.aten.mul.Scalar %909, %int1_973 : !torch.vtensor<[2,640],f32>, !torch.int -> !torch.vtensor<[2,640],f32> | |
%int1_974 = torch.constant.int 1 | |
%911 = torch.aten.mul.Scalar %906, %int1_974 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_975 = torch.constant.int 1 | |
%912 = torch.aten.add.Tensor %910, %911, %int1_975 : !torch.vtensor<[2,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[2,640],f32> | |
%int5_976 = torch.constant.int 5 | |
%913 = torch.prims.convert_element_type %912, %int5_976 : !torch.vtensor<[2,640],f32>, !torch.int -> !torch.vtensor<[2,640],f16> | |
%int0_977 = torch.constant.int 0 | |
%int0_978 = torch.constant.int 0 | |
%int9223372036854775807_979 = torch.constant.int 9223372036854775807 | |
%int1_980 = torch.constant.int 1 | |
%914 = torch.aten.slice.Tensor %913, %int0_977, %int0_978, %int9223372036854775807_979, %int1_980 : !torch.vtensor<[2,640],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,640],f16> | |
%int1_981 = torch.constant.int 1 | |
%int0_982 = torch.constant.int 0 | |
%int9223372036854775807_983 = torch.constant.int 9223372036854775807 | |
%int1_984 = torch.constant.int 1 | |
%915 = torch.aten.slice.Tensor %914, %int1_981, %int0_982, %int9223372036854775807_983, %int1_984 : !torch.vtensor<[2,640],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,640],f16> | |
%int2_985 = torch.constant.int 2 | |
%916 = torch.aten.unsqueeze %915, %int2_985 : !torch.vtensor<[2,640],f16>, !torch.int -> !torch.vtensor<[2,640,1],f16> | |
%int3_986 = torch.constant.int 3 | |
%917 = torch.aten.unsqueeze %916, %int3_986 : !torch.vtensor<[2,640,1],f16>, !torch.int -> !torch.vtensor<[2,640,1,1],f16> | |
%int1_987 = torch.constant.int 1 | |
%918 = torch.aten.add.Tensor %901, %917, %int1_987 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[2,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int2_988 = torch.constant.int 2 | |
%int32_989 = torch.constant.int 32 | |
%int20_990 = torch.constant.int 20 | |
%int4096_991 = torch.constant.int 4096 | |
%919 = torch.prim.ListConstruct %int2_988, %int32_989, %int20_990, %int4096_991 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%920 = torch.aten.view %918, %919 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,32,20,4096],f16> | |
%int6_992 = torch.constant.int 6 | |
%921 = torch.prims.convert_element_type %920, %int6_992 : !torch.vtensor<[2,32,20,4096],f16>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_993 = torch.constant.int 2 | |
%int3_994 = torch.constant.int 3 | |
%922 = torch.prim.ListConstruct %int2_993, %int3_994 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_995 = torch.constant.int 0 | |
%true_996 = torch.constant.bool true | |
%result0_997, %result1_998 = torch.aten.var_mean.correction %921, %922, %int0_995, %true_996 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_999 = torch.constant.float 1.000000e-05 | |
%int1_1000 = torch.constant.int 1 | |
%923 = torch.aten.add.Scalar %result0_997, %float1.000000e-05_999, %int1_1000 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%924 = torch.aten.rsqrt %923 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_1001 = torch.constant.int 1 | |
%925 = torch.aten.sub.Tensor %920, %result1_998, %int1_1001 : !torch.vtensor<[2,32,20,4096],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%926 = torch.aten.mul.Tensor %925, %924 : !torch.vtensor<[2,32,20,4096],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_1002 = torch.constant.int 2 | |
%int640_1003 = torch.constant.int 640 | |
%int64_1004 = torch.constant.int 64 | |
%int64_1005 = torch.constant.int 64 | |
%927 = torch.prim.ListConstruct %int2_1002, %int640_1003, %int64_1004, %int64_1005 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%928 = torch.aten.view %926, %927 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f32> | |
%_params.unet.down_blocks.1.resnets.1.norm2.bias = util.global.load @_params.unet.down_blocks.1.resnets.1.norm2.bias : tensor<640xf16> | |
%929 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.norm2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_1006 = torch.constant.int 0 | |
%930 = torch.aten.unsqueeze %929, %int0_1006 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_1007 = torch.constant.int 2 | |
%931 = torch.aten.unsqueeze %930, %int2_1007 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_1008 = torch.constant.int 3 | |
%932 = torch.aten.unsqueeze %931, %int3_1008 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%_params.unet.down_blocks.1.resnets.1.norm2.weight = util.global.load @_params.unet.down_blocks.1.resnets.1.norm2.weight : tensor<640xf16> | |
%933 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.norm2.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_1009 = torch.constant.int 0 | |
%934 = torch.aten.unsqueeze %933, %int0_1009 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_1010 = torch.constant.int 2 | |
%935 = torch.aten.unsqueeze %934, %int2_1010 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_1011 = torch.constant.int 3 | |
%936 = torch.aten.unsqueeze %935, %int3_1011 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%937 = torch.aten.mul.Tensor %928, %936 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16> -> !torch.vtensor<[2,640,64,64],f32> | |
%int1_1012 = torch.constant.int 1 | |
%938 = torch.aten.add.Tensor %937, %932, %int1_1012 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f32> | |
%int5_1013 = torch.constant.int 5 | |
%939 = torch.prims.convert_element_type %938, %int5_1013 : !torch.vtensor<[2,640,64,64],f32>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int5_1014 = torch.constant.int 5 | |
%940 = torch.prims.convert_element_type %result1_998, %int5_1014 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_1015 = torch.constant.int 5 | |
%941 = torch.prims.convert_element_type %924, %int5_1015 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_1016 = torch.constant.int 3 | |
%942 = torch.prim.ListConstruct %int3_1016 : (!torch.int) -> !torch.list<int> | |
%943 = torch.prims.squeeze %940, %942 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1017 = torch.constant.int 2 | |
%944 = torch.prim.ListConstruct %int2_1017 : (!torch.int) -> !torch.list<int> | |
%945 = torch.prims.squeeze %943, %944 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_1018 = torch.constant.int 3 | |
%946 = torch.prim.ListConstruct %int3_1018 : (!torch.int) -> !torch.list<int> | |
%947 = torch.prims.squeeze %941, %946 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1019 = torch.constant.int 2 | |
%948 = torch.prim.ListConstruct %int2_1019 : (!torch.int) -> !torch.list<int> | |
%949 = torch.prims.squeeze %947, %948 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%950 = torch.aten.detach %945 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%951 = torch.aten.detach %949 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%952 = torch.aten.silu %939 : !torch.vtensor<[2,640,64,64],f16> -> !torch.vtensor<[2,640,64,64],f16> | |
%none_1020 = torch.constant.none | |
%953 = torch.aten.clone %952, %none_1020 : !torch.vtensor<[2,640,64,64],f16>, !torch.none -> !torch.vtensor<[2,640,64,64],f16> | |
%_params.unet.down_blocks.1.resnets.1.conv2.weight = util.global.load @_params.unet.down_blocks.1.resnets.1.conv2.weight : tensor<640x640x3x3xf16> | |
%954 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.conv2.weight : tensor<640x640x3x3xf16> -> !torch.vtensor<[640,640,3,3],f16> | |
%_params.unet.down_blocks.1.resnets.1.conv2.bias = util.global.load @_params.unet.down_blocks.1.resnets.1.conv2.bias : tensor<640xf16> | |
%955 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.resnets.1.conv2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1021 = torch.constant.int 1 | |
%int1_1022 = torch.constant.int 1 | |
%956 = torch.prim.ListConstruct %int1_1021, %int1_1022 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1023 = torch.constant.int 1 | |
%int1_1024 = torch.constant.int 1 | |
%957 = torch.prim.ListConstruct %int1_1023, %int1_1024 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1025 = torch.constant.int 1 | |
%int1_1026 = torch.constant.int 1 | |
%958 = torch.prim.ListConstruct %int1_1025, %int1_1026 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1027 = torch.constant.bool false | |
%int0_1028 = torch.constant.int 0 | |
%int0_1029 = torch.constant.int 0 | |
%959 = torch.prim.ListConstruct %int0_1028, %int0_1029 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1030 = torch.constant.int 1 | |
%960 = torch.aten.convolution %953, %954, %955, %956, %957, %958, %false_1027, %959, %int1_1030 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[640,640,3,3],f16>, !torch.vtensor<[640],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int1_1031 = torch.constant.int 1 | |
%961 = torch.aten.add.Tensor %860, %960, %int1_1031 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[2,640,64,64],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%float1.000000e00_1032 = torch.constant.float 1.000000e+00 | |
%962 = torch.aten.div.Scalar %961, %float1.000000e00_1032 : !torch.vtensor<[2,640,64,64],f16>, !torch.float -> !torch.vtensor<[2,640,64,64],f16> | |
%int2_1033 = torch.constant.int 2 | |
%int32_1034 = torch.constant.int 32 | |
%int20_1035 = torch.constant.int 20 | |
%int4096_1036 = torch.constant.int 4096 | |
%963 = torch.prim.ListConstruct %int2_1033, %int32_1034, %int20_1035, %int4096_1036 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%964 = torch.aten.view %962, %963 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,32,20,4096],f16> | |
%int6_1037 = torch.constant.int 6 | |
%965 = torch.prims.convert_element_type %964, %int6_1037 : !torch.vtensor<[2,32,20,4096],f16>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_1038 = torch.constant.int 2 | |
%int3_1039 = torch.constant.int 3 | |
%966 = torch.prim.ListConstruct %int2_1038, %int3_1039 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_1040 = torch.constant.int 0 | |
%true_1041 = torch.constant.bool true | |
%result0_1042, %result1_1043 = torch.aten.var_mean.correction %965, %966, %int0_1040, %true_1041 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float9.999990e-07_1044 = torch.constant.float 9.9999999999999995E-7 | |
%int1_1045 = torch.constant.int 1 | |
%967 = torch.aten.add.Scalar %result0_1042, %float9.999990e-07_1044, %int1_1045 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%968 = torch.aten.rsqrt %967 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_1046 = torch.constant.int 1 | |
%969 = torch.aten.sub.Tensor %964, %result1_1043, %int1_1046 : !torch.vtensor<[2,32,20,4096],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,20,4096],f32> | |
%970 = torch.aten.mul.Tensor %969, %968 : !torch.vtensor<[2,32,20,4096],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,20,4096],f32> | |
%int2_1047 = torch.constant.int 2 | |
%int640_1048 = torch.constant.int 640 | |
%int64_1049 = torch.constant.int 64 | |
%int64_1050 = torch.constant.int 64 | |
%971 = torch.prim.ListConstruct %int2_1047, %int640_1048, %int64_1049, %int64_1050 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%972 = torch.aten.view %970, %971 : !torch.vtensor<[2,32,20,4096],f32>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f32> | |
%_params.unet.down_blocks.1.attentions.1.norm.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.norm.bias : tensor<640xf16> | |
%973 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.norm.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_1051 = torch.constant.int 0 | |
%974 = torch.aten.unsqueeze %973, %int0_1051 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_1052 = torch.constant.int 2 | |
%975 = torch.aten.unsqueeze %974, %int2_1052 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_1053 = torch.constant.int 3 | |
%976 = torch.aten.unsqueeze %975, %int3_1053 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%_params.unet.down_blocks.1.attentions.1.norm.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.norm.weight : tensor<640xf16> | |
%977 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.norm.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_1054 = torch.constant.int 0 | |
%978 = torch.aten.unsqueeze %977, %int0_1054 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_1055 = torch.constant.int 2 | |
%979 = torch.aten.unsqueeze %978, %int2_1055 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_1056 = torch.constant.int 3 | |
%980 = torch.aten.unsqueeze %979, %int3_1056 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%981 = torch.aten.mul.Tensor %972, %980 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16> -> !torch.vtensor<[2,640,64,64],f32> | |
%int1_1057 = torch.constant.int 1 | |
%982 = torch.aten.add.Tensor %981, %976, %int1_1057 : !torch.vtensor<[2,640,64,64],f32>, !torch.vtensor<[1,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f32> | |
%int5_1058 = torch.constant.int 5 | |
%983 = torch.prims.convert_element_type %982, %int5_1058 : !torch.vtensor<[2,640,64,64],f32>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int5_1059 = torch.constant.int 5 | |
%984 = torch.prims.convert_element_type %result1_1043, %int5_1059 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_1060 = torch.constant.int 5 | |
%985 = torch.prims.convert_element_type %968, %int5_1060 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_1061 = torch.constant.int 3 | |
%986 = torch.prim.ListConstruct %int3_1061 : (!torch.int) -> !torch.list<int> | |
%987 = torch.prims.squeeze %984, %986 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1062 = torch.constant.int 2 | |
%988 = torch.prim.ListConstruct %int2_1062 : (!torch.int) -> !torch.list<int> | |
%989 = torch.prims.squeeze %987, %988 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_1063 = torch.constant.int 3 | |
%990 = torch.prim.ListConstruct %int3_1063 : (!torch.int) -> !torch.list<int> | |
%991 = torch.prims.squeeze %985, %990 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1064 = torch.constant.int 2 | |
%992 = torch.prim.ListConstruct %int2_1064 : (!torch.int) -> !torch.list<int> | |
%993 = torch.prims.squeeze %991, %992 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%994 = torch.aten.detach %989 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%995 = torch.aten.detach %993 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%int0_1065 = torch.constant.int 0 | |
%int2_1066 = torch.constant.int 2 | |
%int3_1067 = torch.constant.int 3 | |
%int1_1068 = torch.constant.int 1 | |
%996 = torch.prim.ListConstruct %int0_1065, %int2_1066, %int3_1067, %int1_1068 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%997 = torch.aten.permute %983, %996 : !torch.vtensor<[2,640,64,64],f16>, !torch.list<int> -> !torch.vtensor<[2,64,64,640],f16> | |
%int2_1069 = torch.constant.int 2 | |
%int4096_1070 = torch.constant.int 4096 | |
%int640_1071 = torch.constant.int 640 | |
%998 = torch.prim.ListConstruct %int2_1069, %int4096_1070, %int640_1071 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%999 = torch.aten.view %997, %998 : !torch.vtensor<[2,64,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.proj_in.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.proj_in.weight : tensor<640x640xf16> | |
%1000 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.proj_in.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1072 = torch.constant.int 0 | |
%int1_1073 = torch.constant.int 1 | |
%1001 = torch.aten.transpose.int %1000, %int0_1072, %int1_1073 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int0_1074 = torch.constant.int 0 | |
%1002 = torch.aten.clone %999, %int0_1074 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_1075 = torch.constant.int 8192 | |
%int640_1076 = torch.constant.int 640 | |
%1003 = torch.prim.ListConstruct %int8192_1075, %int640_1076 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1004 = torch.aten._unsafe_view %1002, %1003 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1005 = torch.aten.mm %1004, %1001 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1077 = torch.constant.int 2 | |
%int4096_1078 = torch.constant.int 4096 | |
%int640_1079 = torch.constant.int 640 | |
%1006 = torch.prim.ListConstruct %int2_1077, %int4096_1078, %int640_1079 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1007 = torch.aten.view %1005, %1006 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.proj_in.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.proj_in.bias : tensor<640xf16> | |
%1008 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.proj_in.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1080 = torch.constant.int 1 | |
%1009 = torch.aten.add.Tensor %1007, %1008, %int1_1080 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_1081 = torch.constant.int 6 | |
%1010 = torch.prims.convert_element_type %1009, %int6_1081 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_1082 = torch.constant.int 2 | |
%1011 = torch.prim.ListConstruct %int2_1082 : (!torch.int) -> !torch.list<int> | |
%int0_1083 = torch.constant.int 0 | |
%true_1084 = torch.constant.bool true | |
%result0_1085, %result1_1086 = torch.aten.var_mean.correction %1010, %1011, %int0_1083, %true_1084 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_1087 = torch.constant.float 1.000000e-05 | |
%int1_1088 = torch.constant.int 1 | |
%1012 = torch.aten.add.Scalar %result0_1085, %float1.000000e-05_1087, %int1_1088 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%1013 = torch.aten.rsqrt %1012 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_1089 = torch.constant.int 1 | |
%1014 = torch.aten.sub.Tensor %1009, %result1_1086, %int1_1089 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%1015 = torch.aten.mul.Tensor %1014, %1013 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight : tensor<640xf16> | |
%1016 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%1017 = torch.aten.mul.Tensor %1015, %1016 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias : tensor<640xf16> | |
%1018 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1090 = torch.constant.int 1 | |
%1019 = torch.aten.add.Tensor %1017, %1018, %int1_1090 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_1091 = torch.constant.int 5 | |
%1020 = torch.prims.convert_element_type %1019, %int5_1091 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_1092 = torch.constant.int 5 | |
%1021 = torch.prims.convert_element_type %result1_1086, %int5_1092 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_1093 = torch.constant.int 5 | |
%1022 = torch.prims.convert_element_type %1013, %int5_1093 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight : tensor<640x640xf16> | |
%1023 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1094 = torch.constant.int 0 | |
%int1_1095 = torch.constant.int 1 | |
%1024 = torch.aten.transpose.int %1023, %int0_1094, %int1_1095 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1096 = torch.constant.int 8192 | |
%int640_1097 = torch.constant.int 640 | |
%1025 = torch.prim.ListConstruct %int8192_1096, %int640_1097 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1026 = torch.aten.view %1020, %1025 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1027 = torch.aten.mm %1026, %1024 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1098 = torch.constant.int 2 | |
%int4096_1099 = torch.constant.int 4096 | |
%int640_1100 = torch.constant.int 640 | |
%1028 = torch.prim.ListConstruct %int2_1098, %int4096_1099, %int640_1100 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1029 = torch.aten.view %1027, %1028 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight : tensor<640x640xf16> | |
%1030 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_k.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1101 = torch.constant.int 0 | |
%int1_1102 = torch.constant.int 1 | |
%1031 = torch.aten.transpose.int %1030, %int0_1101, %int1_1102 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1103 = torch.constant.int 8192 | |
%int640_1104 = torch.constant.int 640 | |
%1032 = torch.prim.ListConstruct %int8192_1103, %int640_1104 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1033 = torch.aten.view %1020, %1032 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1034 = torch.aten.mm %1033, %1031 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1105 = torch.constant.int 2 | |
%int4096_1106 = torch.constant.int 4096 | |
%int640_1107 = torch.constant.int 640 | |
%1035 = torch.prim.ListConstruct %int2_1105, %int4096_1106, %int640_1107 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1036 = torch.aten.view %1034, %1035 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight : tensor<640x640xf16> | |
%1037 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_v.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1108 = torch.constant.int 0 | |
%int1_1109 = torch.constant.int 1 | |
%1038 = torch.aten.transpose.int %1037, %int0_1108, %int1_1109 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1110 = torch.constant.int 8192 | |
%int640_1111 = torch.constant.int 640 | |
%1039 = torch.prim.ListConstruct %int8192_1110, %int640_1111 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1040 = torch.aten.view %1020, %1039 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1041 = torch.aten.mm %1040, %1038 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1112 = torch.constant.int 2 | |
%int4096_1113 = torch.constant.int 4096 | |
%int640_1114 = torch.constant.int 640 | |
%1042 = torch.prim.ListConstruct %int2_1112, %int4096_1113, %int640_1114 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1043 = torch.aten.view %1041, %1042 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int2_1115 = torch.constant.int 2 | |
%int-1_1116 = torch.constant.int -1 | |
%int10_1117 = torch.constant.int 10 | |
%int64_1118 = torch.constant.int 64 | |
%1044 = torch.prim.ListConstruct %int2_1115, %int-1_1116, %int10_1117, %int64_1118 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1045 = torch.aten.view %1029, %1044 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1119 = torch.constant.int 1 | |
%int2_1120 = torch.constant.int 2 | |
%1046 = torch.aten.transpose.int %1045, %int1_1119, %int2_1120 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_1121 = torch.constant.int 2 | |
%int-1_1122 = torch.constant.int -1 | |
%int10_1123 = torch.constant.int 10 | |
%int64_1124 = torch.constant.int 64 | |
%1047 = torch.prim.ListConstruct %int2_1121, %int-1_1122, %int10_1123, %int64_1124 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1048 = torch.aten.view %1036, %1047 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1125 = torch.constant.int 1 | |
%int2_1126 = torch.constant.int 2 | |
%1049 = torch.aten.transpose.int %1048, %int1_1125, %int2_1126 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_1127 = torch.constant.int 2 | |
%int-1_1128 = torch.constant.int -1 | |
%int10_1129 = torch.constant.int 10 | |
%int64_1130 = torch.constant.int 64 | |
%1050 = torch.prim.ListConstruct %int2_1127, %int-1_1128, %int10_1129, %int64_1130 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1051 = torch.aten.view %1043, %1050 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1131 = torch.constant.int 1 | |
%int2_1132 = torch.constant.int 2 | |
%1052 = torch.aten.transpose.int %1051, %int1_1131, %int2_1132 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%float0.000000e00_1133 = torch.constant.float 0.000000e+00 | |
%false_1134 = torch.constant.bool false | |
%none_1135 = torch.constant.none | |
%none_1136 = torch.constant.none | |
%1053:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1046, %1049, %1052, %float0.000000e00_1133, %false_1134, %none_1135, %none_1136) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%1054 = torch.aten.detach %1053#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_1137 = torch.constant.int 1 | |
%int2_1138 = torch.constant.int 2 | |
%1055 = torch.aten.transpose.int %1053#0, %int1_1137, %int2_1138 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_1139 = torch.constant.int 2 | |
%int-1_1140 = torch.constant.int -1 | |
%int640_1141 = torch.constant.int 640 | |
%1056 = torch.prim.ListConstruct %int2_1139, %int-1_1140, %int640_1141 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1057 = torch.aten.view %1055, %1056 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_1142 = torch.constant.int 8192 | |
%int640_1143 = torch.constant.int 640 | |
%1058 = torch.prim.ListConstruct %int8192_1142, %int640_1143 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1059 = torch.aten.view %1057, %1058 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight : tensor<640x640xf16> | |
%1060 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1144 = torch.constant.int 0 | |
%int1_1145 = torch.constant.int 1 | |
%1061 = torch.aten.transpose.int %1060, %int0_1144, %int1_1145 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias : tensor<640xf16> | |
%1062 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1146 = torch.constant.int 6 | |
%1063 = torch.prims.convert_element_type %1062, %int6_1146 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1147 = torch.constant.int 6 | |
%1064 = torch.prims.convert_element_type %1059, %int6_1147 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1148 = torch.constant.int 6 | |
%1065 = torch.prims.convert_element_type %1061, %int6_1148 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%1066 = torch.aten.mm %1064, %1065 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1149 = torch.constant.int 1 | |
%1067 = torch.aten.mul.Scalar %1066, %int1_1149 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1150 = torch.constant.int 1 | |
%1068 = torch.aten.mul.Scalar %1063, %int1_1150 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1151 = torch.constant.int 1 | |
%1069 = torch.aten.add.Tensor %1067, %1068, %int1_1151 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1152 = torch.constant.int 5 | |
%1070 = torch.prims.convert_element_type %1069, %int5_1152 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1153 = torch.constant.int 2 | |
%int4096_1154 = torch.constant.int 4096 | |
%int640_1155 = torch.constant.int 640 | |
%1071 = torch.prim.ListConstruct %int2_1153, %int4096_1154, %int640_1155 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1072 = torch.aten.view %1070, %1071 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_1156 = torch.constant.none | |
%1073 = torch.aten.clone %1072, %none_1156 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_1157 = torch.constant.float 1.000000e+00 | |
%1074 = torch.aten.div.Scalar %1073, %float1.000000e00_1157 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_1158 = torch.constant.int 1 | |
%1075 = torch.aten.add.Tensor %1074, %1009, %int1_1158 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_1159 = torch.constant.int 6 | |
%1076 = torch.prims.convert_element_type %1075, %int6_1159 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_1160 = torch.constant.int 2 | |
%1077 = torch.prim.ListConstruct %int2_1160 : (!torch.int) -> !torch.list<int> | |
%int0_1161 = torch.constant.int 0 | |
%true_1162 = torch.constant.bool true | |
%result0_1163, %result1_1164 = torch.aten.var_mean.correction %1076, %1077, %int0_1161, %true_1162 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_1165 = torch.constant.float 1.000000e-05 | |
%int1_1166 = torch.constant.int 1 | |
%1078 = torch.aten.add.Scalar %result0_1163, %float1.000000e-05_1165, %int1_1166 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%1079 = torch.aten.rsqrt %1078 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_1167 = torch.constant.int 1 | |
%1080 = torch.aten.sub.Tensor %1075, %result1_1164, %int1_1167 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%1081 = torch.aten.mul.Tensor %1080, %1079 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight : tensor<640xf16> | |
%1082 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%1083 = torch.aten.mul.Tensor %1081, %1082 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias : tensor<640xf16> | |
%1084 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1168 = torch.constant.int 1 | |
%1085 = torch.aten.add.Tensor %1083, %1084, %int1_1168 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_1169 = torch.constant.int 5 | |
%1086 = torch.prims.convert_element_type %1085, %int5_1169 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_1170 = torch.constant.int 5 | |
%1087 = torch.prims.convert_element_type %result1_1164, %int5_1170 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_1171 = torch.constant.int 5 | |
%1088 = torch.prims.convert_element_type %1079, %int5_1171 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight : tensor<640x640xf16> | |
%1089 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1172 = torch.constant.int 0 | |
%int1_1173 = torch.constant.int 1 | |
%1090 = torch.aten.transpose.int %1089, %int0_1172, %int1_1173 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1174 = torch.constant.int 8192 | |
%int640_1175 = torch.constant.int 640 | |
%1091 = torch.prim.ListConstruct %int8192_1174, %int640_1175 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1092 = torch.aten.view %1086, %1091 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1093 = torch.aten.mm %1092, %1090 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1176 = torch.constant.int 2 | |
%int4096_1177 = torch.constant.int 4096 | |
%int640_1178 = torch.constant.int 640 | |
%1094 = torch.prim.ListConstruct %int2_1176, %int4096_1177, %int640_1178 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1095 = torch.aten.view %1093, %1094 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight : tensor<640x2048xf16> | |
%1096 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_k.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_1179 = torch.constant.int 0 | |
%int1_1180 = torch.constant.int 1 | |
%1097 = torch.aten.transpose.int %1096, %int0_1179, %int1_1180 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_1181 = torch.constant.int 128 | |
%int2048_1182 = torch.constant.int 2048 | |
%1098 = torch.prim.ListConstruct %int128_1181, %int2048_1182 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1099 = torch.aten.view %arg1, %1098 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1100 = torch.aten.mm %1099, %1097 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_1183 = torch.constant.int 2 | |
%int64_1184 = torch.constant.int 64 | |
%int640_1185 = torch.constant.int 640 | |
%1101 = torch.prim.ListConstruct %int2_1183, %int64_1184, %int640_1185 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1102 = torch.aten.view %1100, %1101 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight : tensor<640x2048xf16> | |
%1103 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_v.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_1186 = torch.constant.int 0 | |
%int1_1187 = torch.constant.int 1 | |
%1104 = torch.aten.transpose.int %1103, %int0_1186, %int1_1187 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_1188 = torch.constant.int 128 | |
%int2048_1189 = torch.constant.int 2048 | |
%1105 = torch.prim.ListConstruct %int128_1188, %int2048_1189 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1106 = torch.aten.view %arg1, %1105 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1107 = torch.aten.mm %1106, %1104 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_1190 = torch.constant.int 2 | |
%int64_1191 = torch.constant.int 64 | |
%int640_1192 = torch.constant.int 640 | |
%1108 = torch.prim.ListConstruct %int2_1190, %int64_1191, %int640_1192 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1109 = torch.aten.view %1107, %1108 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%int2_1193 = torch.constant.int 2 | |
%int-1_1194 = torch.constant.int -1 | |
%int10_1195 = torch.constant.int 10 | |
%int64_1196 = torch.constant.int 64 | |
%1110 = torch.prim.ListConstruct %int2_1193, %int-1_1194, %int10_1195, %int64_1196 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1111 = torch.aten.view %1095, %1110 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1197 = torch.constant.int 1 | |
%int2_1198 = torch.constant.int 2 | |
%1112 = torch.aten.transpose.int %1111, %int1_1197, %int2_1198 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_1199 = torch.constant.int 2 | |
%int-1_1200 = torch.constant.int -1 | |
%int10_1201 = torch.constant.int 10 | |
%int64_1202 = torch.constant.int 64 | |
%1113 = torch.prim.ListConstruct %int2_1199, %int-1_1200, %int10_1201, %int64_1202 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1114 = torch.aten.view %1102, %1113 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_1203 = torch.constant.int 1 | |
%int2_1204 = torch.constant.int 2 | |
%1115 = torch.aten.transpose.int %1114, %int1_1203, %int2_1204 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%int2_1205 = torch.constant.int 2 | |
%int-1_1206 = torch.constant.int -1 | |
%int10_1207 = torch.constant.int 10 | |
%int64_1208 = torch.constant.int 64 | |
%1116 = torch.prim.ListConstruct %int2_1205, %int-1_1206, %int10_1207, %int64_1208 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1117 = torch.aten.view %1109, %1116 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_1209 = torch.constant.int 1 | |
%int2_1210 = torch.constant.int 2 | |
%1118 = torch.aten.transpose.int %1117, %int1_1209, %int2_1210 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%float0.000000e00_1211 = torch.constant.float 0.000000e+00 | |
%false_1212 = torch.constant.bool false | |
%none_1213 = torch.constant.none | |
%none_1214 = torch.constant.none | |
%1119:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1112, %1115, %1118, %float0.000000e00_1211, %false_1212, %none_1213, %none_1214) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%1120 = torch.aten.detach %1119#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_1215 = torch.constant.int 1 | |
%int2_1216 = torch.constant.int 2 | |
%1121 = torch.aten.transpose.int %1119#0, %int1_1215, %int2_1216 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_1217 = torch.constant.int 2 | |
%int-1_1218 = torch.constant.int -1 | |
%int640_1219 = torch.constant.int 640 | |
%1122 = torch.prim.ListConstruct %int2_1217, %int-1_1218, %int640_1219 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1123 = torch.aten.view %1121, %1122 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_1220 = torch.constant.int 8192 | |
%int640_1221 = torch.constant.int 640 | |
%1124 = torch.prim.ListConstruct %int8192_1220, %int640_1221 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1125 = torch.aten.view %1123, %1124 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight : tensor<640x640xf16> | |
%1126 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1222 = torch.constant.int 0 | |
%int1_1223 = torch.constant.int 1 | |
%1127 = torch.aten.transpose.int %1126, %int0_1222, %int1_1223 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias : tensor<640xf16> | |
%1128 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1224 = torch.constant.int 6 | |
%1129 = torch.prims.convert_element_type %1128, %int6_1224 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1225 = torch.constant.int 6 | |
%1130 = torch.prims.convert_element_type %1125, %int6_1225 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1226 = torch.constant.int 6 | |
%1131 = torch.prims.convert_element_type %1127, %int6_1226 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%1132 = torch.aten.mm %1130, %1131 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1227 = torch.constant.int 1 | |
%1133 = torch.aten.mul.Scalar %1132, %int1_1227 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1228 = torch.constant.int 1 | |
%1134 = torch.aten.mul.Scalar %1129, %int1_1228 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1229 = torch.constant.int 1 | |
%1135 = torch.aten.add.Tensor %1133, %1134, %int1_1229 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1230 = torch.constant.int 5 | |
%1136 = torch.prims.convert_element_type %1135, %int5_1230 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1231 = torch.constant.int 2 | |
%int4096_1232 = torch.constant.int 4096 | |
%int640_1233 = torch.constant.int 640 | |
%1137 = torch.prim.ListConstruct %int2_1231, %int4096_1232, %int640_1233 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1138 = torch.aten.view %1136, %1137 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_1234 = torch.constant.none | |
%1139 = torch.aten.clone %1138, %none_1234 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_1235 = torch.constant.float 1.000000e+00 | |
%1140 = torch.aten.div.Scalar %1139, %float1.000000e00_1235 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_1236 = torch.constant.int 1 | |
%1141 = torch.aten.add.Tensor %1140, %1075, %int1_1236 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_1237 = torch.constant.int 6 | |
%1142 = torch.prims.convert_element_type %1141, %int6_1237 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_1238 = torch.constant.int 2 | |
%1143 = torch.prim.ListConstruct %int2_1238 : (!torch.int) -> !torch.list<int> | |
%int0_1239 = torch.constant.int 0 | |
%true_1240 = torch.constant.bool true | |
%result0_1241, %result1_1242 = torch.aten.var_mean.correction %1142, %1143, %int0_1239, %true_1240 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_1243 = torch.constant.float 1.000000e-05 | |
%int1_1244 = torch.constant.int 1 | |
%1144 = torch.aten.add.Scalar %result0_1241, %float1.000000e-05_1243, %int1_1244 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%1145 = torch.aten.rsqrt %1144 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_1245 = torch.constant.int 1 | |
%1146 = torch.aten.sub.Tensor %1141, %result1_1242, %int1_1245 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%1147 = torch.aten.mul.Tensor %1146, %1145 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight : tensor<640xf16> | |
%1148 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%1149 = torch.aten.mul.Tensor %1147, %1148 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias : tensor<640xf16> | |
%1150 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1246 = torch.constant.int 1 | |
%1151 = torch.aten.add.Tensor %1149, %1150, %int1_1246 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_1247 = torch.constant.int 5 | |
%1152 = torch.prims.convert_element_type %1151, %int5_1247 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_1248 = torch.constant.int 5 | |
%1153 = torch.prims.convert_element_type %result1_1242, %int5_1248 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_1249 = torch.constant.int 5 | |
%1154 = torch.prims.convert_element_type %1145, %int5_1249 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int8192_1250 = torch.constant.int 8192 | |
%int640_1251 = torch.constant.int 640 | |
%1155 = torch.prim.ListConstruct %int8192_1250, %int640_1251 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1156 = torch.aten.view %1152, %1155 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight : tensor<5120x640xf16> | |
%1157 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight : tensor<5120x640xf16> -> !torch.vtensor<[5120,640],f16> | |
%int0_1252 = torch.constant.int 0 | |
%int1_1253 = torch.constant.int 1 | |
%1158 = torch.aten.transpose.int %1157, %int0_1252, %int1_1253 : !torch.vtensor<[5120,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,5120],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias : tensor<5120xf16> | |
%1159 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias : tensor<5120xf16> -> !torch.vtensor<[5120],f16> | |
%int6_1254 = torch.constant.int 6 | |
%1160 = torch.prims.convert_element_type %1159, %int6_1254 : !torch.vtensor<[5120],f16>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int6_1255 = torch.constant.int 6 | |
%1161 = torch.prims.convert_element_type %1156, %int6_1255 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1256 = torch.constant.int 6 | |
%1162 = torch.prims.convert_element_type %1158, %int6_1256 : !torch.vtensor<[640,5120],f16>, !torch.int -> !torch.vtensor<[640,5120],f32> | |
%1163 = torch.aten.mm %1161, %1162 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,5120],f32> -> !torch.vtensor<[8192,5120],f32> | |
%int1_1257 = torch.constant.int 1 | |
%1164 = torch.aten.mul.Scalar %1163, %int1_1257 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int1_1258 = torch.constant.int 1 | |
%1165 = torch.aten.mul.Scalar %1160, %int1_1258 : !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int1_1259 = torch.constant.int 1 | |
%1166 = torch.aten.add.Tensor %1164, %1165, %int1_1259 : !torch.vtensor<[8192,5120],f32>, !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int5_1260 = torch.constant.int 5 | |
%1167 = torch.prims.convert_element_type %1166, %int5_1260 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f16> | |
%int2_1261 = torch.constant.int 2 | |
%int4096_1262 = torch.constant.int 4096 | |
%int5120_1263 = torch.constant.int 5120 | |
%1168 = torch.prim.ListConstruct %int2_1261, %int4096_1262, %int5120_1263 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1169 = torch.aten.view %1167, %1168 : !torch.vtensor<[8192,5120],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,5120],f16> | |
%int-1_1264 = torch.constant.int -1 | |
%int0_1265 = torch.constant.int 0 | |
%int2560_1266 = torch.constant.int 2560 | |
%int1_1267 = torch.constant.int 1 | |
%1170 = torch.aten.slice.Tensor %1169, %int-1_1264, %int0_1265, %int2560_1266, %int1_1267 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%int-1_1268 = torch.constant.int -1 | |
%int2560_1269 = torch.constant.int 2560 | |
%int5120_1270 = torch.constant.int 5120 | |
%int1_1271 = torch.constant.int 1 | |
%1171 = torch.aten.slice.Tensor %1169, %int-1_1268, %int2560_1269, %int5120_1270, %int1_1271 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%str_1272 = torch.constant.str "none" | |
%1172 = torch.aten.gelu %1171, %str_1272 : !torch.vtensor<[2,4096,2560],f16>, !torch.str -> !torch.vtensor<[2,4096,2560],f16> | |
%1173 = torch.aten.mul.Tensor %1170, %1172 : !torch.vtensor<[2,4096,2560],f16>, !torch.vtensor<[2,4096,2560],f16> -> !torch.vtensor<[2,4096,2560],f16> | |
%none_1273 = torch.constant.none | |
%1174 = torch.aten.clone %1173, %none_1273 : !torch.vtensor<[2,4096,2560],f16>, !torch.none -> !torch.vtensor<[2,4096,2560],f16> | |
%int8192_1274 = torch.constant.int 8192 | |
%int2560_1275 = torch.constant.int 2560 | |
%1175 = torch.prim.ListConstruct %int8192_1274, %int2560_1275 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1176 = torch.aten.view %1174, %1175 : !torch.vtensor<[2,4096,2560],f16>, !torch.list<int> -> !torch.vtensor<[8192,2560],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight : tensor<640x2560xf16> | |
%1177 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight : tensor<640x2560xf16> -> !torch.vtensor<[640,2560],f16> | |
%int0_1276 = torch.constant.int 0 | |
%int1_1277 = torch.constant.int 1 | |
%1178 = torch.aten.transpose.int %1177, %int0_1276, %int1_1277 : !torch.vtensor<[640,2560],f16>, !torch.int, !torch.int -> !torch.vtensor<[2560,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias : tensor<640xf16> | |
%1179 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1278 = torch.constant.int 6 | |
%1180 = torch.prims.convert_element_type %1179, %int6_1278 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1279 = torch.constant.int 6 | |
%1181 = torch.prims.convert_element_type %1176, %int6_1279 : !torch.vtensor<[8192,2560],f16>, !torch.int -> !torch.vtensor<[8192,2560],f32> | |
%int6_1280 = torch.constant.int 6 | |
%1182 = torch.prims.convert_element_type %1178, %int6_1280 : !torch.vtensor<[2560,640],f16>, !torch.int -> !torch.vtensor<[2560,640],f32> | |
%1183 = torch.aten.mm %1181, %1182 : !torch.vtensor<[8192,2560],f32>, !torch.vtensor<[2560,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1281 = torch.constant.int 1 | |
%1184 = torch.aten.mul.Scalar %1183, %int1_1281 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1282 = torch.constant.int 1 | |
%1185 = torch.aten.mul.Scalar %1180, %int1_1282 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1283 = torch.constant.int 1 | |
%1186 = torch.aten.add.Tensor %1184, %1185, %int1_1283 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1284 = torch.constant.int 5 | |
%1187 = torch.prims.convert_element_type %1186, %int5_1284 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1285 = torch.constant.int 2 | |
%int4096_1286 = torch.constant.int 4096 | |
%int640_1287 = torch.constant.int 640 | |
%1188 = torch.prim.ListConstruct %int2_1285, %int4096_1286, %int640_1287 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1189 = torch.aten.view %1187, %1188 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int1_1288 = torch.constant.int 1 | |
%1190 = torch.aten.add.Tensor %1189, %1141, %int1_1288 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_1289 = torch.constant.int 6 | |
%1191 = torch.prims.convert_element_type %1190, %int6_1289 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_1290 = torch.constant.int 2 | |
%1192 = torch.prim.ListConstruct %int2_1290 : (!torch.int) -> !torch.list<int> | |
%int0_1291 = torch.constant.int 0 | |
%true_1292 = torch.constant.bool true | |
%result0_1293, %result1_1294 = torch.aten.var_mean.correction %1191, %1192, %int0_1291, %true_1292 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_1295 = torch.constant.float 1.000000e-05 | |
%int1_1296 = torch.constant.int 1 | |
%1193 = torch.aten.add.Scalar %result0_1293, %float1.000000e-05_1295, %int1_1296 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%1194 = torch.aten.rsqrt %1193 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_1297 = torch.constant.int 1 | |
%1195 = torch.aten.sub.Tensor %1190, %result1_1294, %int1_1297 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%1196 = torch.aten.mul.Tensor %1195, %1194 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight : tensor<640xf16> | |
%1197 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%1198 = torch.aten.mul.Tensor %1196, %1197 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias : tensor<640xf16> | |
%1199 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1298 = torch.constant.int 1 | |
%1200 = torch.aten.add.Tensor %1198, %1199, %int1_1298 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_1299 = torch.constant.int 5 | |
%1201 = torch.prims.convert_element_type %1200, %int5_1299 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_1300 = torch.constant.int 5 | |
%1202 = torch.prims.convert_element_type %result1_1294, %int5_1300 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_1301 = torch.constant.int 5 | |
%1203 = torch.prims.convert_element_type %1194, %int5_1301 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight : tensor<640x640xf16> | |
%1204 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1302 = torch.constant.int 0 | |
%int1_1303 = torch.constant.int 1 | |
%1205 = torch.aten.transpose.int %1204, %int0_1302, %int1_1303 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1304 = torch.constant.int 8192 | |
%int640_1305 = torch.constant.int 640 | |
%1206 = torch.prim.ListConstruct %int8192_1304, %int640_1305 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1207 = torch.aten.view %1201, %1206 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1208 = torch.aten.mm %1207, %1205 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1306 = torch.constant.int 2 | |
%int4096_1307 = torch.constant.int 4096 | |
%int640_1308 = torch.constant.int 640 | |
%1209 = torch.prim.ListConstruct %int2_1306, %int4096_1307, %int640_1308 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1210 = torch.aten.view %1208, %1209 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight : tensor<640x640xf16> | |
%1211 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_k.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1309 = torch.constant.int 0 | |
%int1_1310 = torch.constant.int 1 | |
%1212 = torch.aten.transpose.int %1211, %int0_1309, %int1_1310 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1311 = torch.constant.int 8192 | |
%int640_1312 = torch.constant.int 640 | |
%1213 = torch.prim.ListConstruct %int8192_1311, %int640_1312 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1214 = torch.aten.view %1201, %1213 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1215 = torch.aten.mm %1214, %1212 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1313 = torch.constant.int 2 | |
%int4096_1314 = torch.constant.int 4096 | |
%int640_1315 = torch.constant.int 640 | |
%1216 = torch.prim.ListConstruct %int2_1313, %int4096_1314, %int640_1315 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1217 = torch.aten.view %1215, %1216 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight : tensor<640x640xf16> | |
%1218 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_v.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1316 = torch.constant.int 0 | |
%int1_1317 = torch.constant.int 1 | |
%1219 = torch.aten.transpose.int %1218, %int0_1316, %int1_1317 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1318 = torch.constant.int 8192 | |
%int640_1319 = torch.constant.int 640 | |
%1220 = torch.prim.ListConstruct %int8192_1318, %int640_1319 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1221 = torch.aten.view %1201, %1220 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1222 = torch.aten.mm %1221, %1219 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1320 = torch.constant.int 2 | |
%int4096_1321 = torch.constant.int 4096 | |
%int640_1322 = torch.constant.int 640 | |
%1223 = torch.prim.ListConstruct %int2_1320, %int4096_1321, %int640_1322 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1224 = torch.aten.view %1222, %1223 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int2_1323 = torch.constant.int 2 | |
%int-1_1324 = torch.constant.int -1 | |
%int10_1325 = torch.constant.int 10 | |
%int64_1326 = torch.constant.int 64 | |
%1225 = torch.prim.ListConstruct %int2_1323, %int-1_1324, %int10_1325, %int64_1326 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1226 = torch.aten.view %1210, %1225 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1327 = torch.constant.int 1 | |
%int2_1328 = torch.constant.int 2 | |
%1227 = torch.aten.transpose.int %1226, %int1_1327, %int2_1328 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_1329 = torch.constant.int 2 | |
%int-1_1330 = torch.constant.int -1 | |
%int10_1331 = torch.constant.int 10 | |
%int64_1332 = torch.constant.int 64 | |
%1228 = torch.prim.ListConstruct %int2_1329, %int-1_1330, %int10_1331, %int64_1332 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1229 = torch.aten.view %1217, %1228 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1333 = torch.constant.int 1 | |
%int2_1334 = torch.constant.int 2 | |
%1230 = torch.aten.transpose.int %1229, %int1_1333, %int2_1334 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_1335 = torch.constant.int 2 | |
%int-1_1336 = torch.constant.int -1 | |
%int10_1337 = torch.constant.int 10 | |
%int64_1338 = torch.constant.int 64 | |
%1231 = torch.prim.ListConstruct %int2_1335, %int-1_1336, %int10_1337, %int64_1338 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1232 = torch.aten.view %1224, %1231 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1339 = torch.constant.int 1 | |
%int2_1340 = torch.constant.int 2 | |
%1233 = torch.aten.transpose.int %1232, %int1_1339, %int2_1340 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%float0.000000e00_1341 = torch.constant.float 0.000000e+00 | |
%false_1342 = torch.constant.bool false | |
%none_1343 = torch.constant.none | |
%none_1344 = torch.constant.none | |
%1234:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1227, %1230, %1233, %float0.000000e00_1341, %false_1342, %none_1343, %none_1344) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%1235 = torch.aten.detach %1234#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_1345 = torch.constant.int 1 | |
%int2_1346 = torch.constant.int 2 | |
%1236 = torch.aten.transpose.int %1234#0, %int1_1345, %int2_1346 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_1347 = torch.constant.int 2 | |
%int-1_1348 = torch.constant.int -1 | |
%int640_1349 = torch.constant.int 640 | |
%1237 = torch.prim.ListConstruct %int2_1347, %int-1_1348, %int640_1349 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1238 = torch.aten.view %1236, %1237 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_1350 = torch.constant.int 8192 | |
%int640_1351 = torch.constant.int 640 | |
%1239 = torch.prim.ListConstruct %int8192_1350, %int640_1351 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1240 = torch.aten.view %1238, %1239 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight : tensor<640x640xf16> | |
%1241 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1352 = torch.constant.int 0 | |
%int1_1353 = torch.constant.int 1 | |
%1242 = torch.aten.transpose.int %1241, %int0_1352, %int1_1353 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias : tensor<640xf16> | |
%1243 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1354 = torch.constant.int 6 | |
%1244 = torch.prims.convert_element_type %1243, %int6_1354 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1355 = torch.constant.int 6 | |
%1245 = torch.prims.convert_element_type %1240, %int6_1355 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1356 = torch.constant.int 6 | |
%1246 = torch.prims.convert_element_type %1242, %int6_1356 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%1247 = torch.aten.mm %1245, %1246 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1357 = torch.constant.int 1 | |
%1248 = torch.aten.mul.Scalar %1247, %int1_1357 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1358 = torch.constant.int 1 | |
%1249 = torch.aten.mul.Scalar %1244, %int1_1358 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1359 = torch.constant.int 1 | |
%1250 = torch.aten.add.Tensor %1248, %1249, %int1_1359 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1360 = torch.constant.int 5 | |
%1251 = torch.prims.convert_element_type %1250, %int5_1360 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1361 = torch.constant.int 2 | |
%int4096_1362 = torch.constant.int 4096 | |
%int640_1363 = torch.constant.int 640 | |
%1252 = torch.prim.ListConstruct %int2_1361, %int4096_1362, %int640_1363 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1253 = torch.aten.view %1251, %1252 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_1364 = torch.constant.none | |
%1254 = torch.aten.clone %1253, %none_1364 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_1365 = torch.constant.float 1.000000e+00 | |
%1255 = torch.aten.div.Scalar %1254, %float1.000000e00_1365 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_1366 = torch.constant.int 1 | |
%1256 = torch.aten.add.Tensor %1255, %1190, %int1_1366 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_1367 = torch.constant.int 6 | |
%1257 = torch.prims.convert_element_type %1256, %int6_1367 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_1368 = torch.constant.int 2 | |
%1258 = torch.prim.ListConstruct %int2_1368 : (!torch.int) -> !torch.list<int> | |
%int0_1369 = torch.constant.int 0 | |
%true_1370 = torch.constant.bool true | |
%result0_1371, %result1_1372 = torch.aten.var_mean.correction %1257, %1258, %int0_1369, %true_1370 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_1373 = torch.constant.float 1.000000e-05 | |
%int1_1374 = torch.constant.int 1 | |
%1259 = torch.aten.add.Scalar %result0_1371, %float1.000000e-05_1373, %int1_1374 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%1260 = torch.aten.rsqrt %1259 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_1375 = torch.constant.int 1 | |
%1261 = torch.aten.sub.Tensor %1256, %result1_1372, %int1_1375 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%1262 = torch.aten.mul.Tensor %1261, %1260 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight : tensor<640xf16> | |
%1263 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%1264 = torch.aten.mul.Tensor %1262, %1263 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias : tensor<640xf16> | |
%1265 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1376 = torch.constant.int 1 | |
%1266 = torch.aten.add.Tensor %1264, %1265, %int1_1376 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_1377 = torch.constant.int 5 | |
%1267 = torch.prims.convert_element_type %1266, %int5_1377 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_1378 = torch.constant.int 5 | |
%1268 = torch.prims.convert_element_type %result1_1372, %int5_1378 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_1379 = torch.constant.int 5 | |
%1269 = torch.prims.convert_element_type %1260, %int5_1379 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight : tensor<640x640xf16> | |
%1270 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1380 = torch.constant.int 0 | |
%int1_1381 = torch.constant.int 1 | |
%1271 = torch.aten.transpose.int %1270, %int0_1380, %int1_1381 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%int8192_1382 = torch.constant.int 8192 | |
%int640_1383 = torch.constant.int 640 | |
%1272 = torch.prim.ListConstruct %int8192_1382, %int640_1383 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1273 = torch.aten.view %1267, %1272 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%1274 = torch.aten.mm %1273, %1271 : !torch.vtensor<[8192,640],f16>, !torch.vtensor<[640,640],f16> -> !torch.vtensor<[8192,640],f16> | |
%int2_1384 = torch.constant.int 2 | |
%int4096_1385 = torch.constant.int 4096 | |
%int640_1386 = torch.constant.int 640 | |
%1275 = torch.prim.ListConstruct %int2_1384, %int4096_1385, %int640_1386 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1276 = torch.aten.view %1274, %1275 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight : tensor<640x2048xf16> | |
%1277 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_k.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_1387 = torch.constant.int 0 | |
%int1_1388 = torch.constant.int 1 | |
%1278 = torch.aten.transpose.int %1277, %int0_1387, %int1_1388 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_1389 = torch.constant.int 128 | |
%int2048_1390 = torch.constant.int 2048 | |
%1279 = torch.prim.ListConstruct %int128_1389, %int2048_1390 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1280 = torch.aten.view %arg1, %1279 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1281 = torch.aten.mm %1280, %1278 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_1391 = torch.constant.int 2 | |
%int64_1392 = torch.constant.int 64 | |
%int640_1393 = torch.constant.int 640 | |
%1282 = torch.prim.ListConstruct %int2_1391, %int64_1392, %int640_1393 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1283 = torch.aten.view %1281, %1282 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight : tensor<640x2048xf16> | |
%1284 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_v.weight : tensor<640x2048xf16> -> !torch.vtensor<[640,2048],f16> | |
%int0_1394 = torch.constant.int 0 | |
%int1_1395 = torch.constant.int 1 | |
%1285 = torch.aten.transpose.int %1284, %int0_1394, %int1_1395 : !torch.vtensor<[640,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,640],f16> | |
%int128_1396 = torch.constant.int 128 | |
%int2048_1397 = torch.constant.int 2048 | |
%1286 = torch.prim.ListConstruct %int128_1396, %int2048_1397 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1287 = torch.aten.view %arg1, %1286 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1288 = torch.aten.mm %1287, %1285 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,640],f16> -> !torch.vtensor<[128,640],f16> | |
%int2_1398 = torch.constant.int 2 | |
%int64_1399 = torch.constant.int 64 | |
%int640_1400 = torch.constant.int 640 | |
%1289 = torch.prim.ListConstruct %int2_1398, %int64_1399, %int640_1400 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1290 = torch.aten.view %1288, %1289 : !torch.vtensor<[128,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,640],f16> | |
%int2_1401 = torch.constant.int 2 | |
%int-1_1402 = torch.constant.int -1 | |
%int10_1403 = torch.constant.int 10 | |
%int64_1404 = torch.constant.int 64 | |
%1291 = torch.prim.ListConstruct %int2_1401, %int-1_1402, %int10_1403, %int64_1404 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1292 = torch.aten.view %1276, %1291 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,10,64],f16> | |
%int1_1405 = torch.constant.int 1 | |
%int2_1406 = torch.constant.int 2 | |
%1293 = torch.aten.transpose.int %1292, %int1_1405, %int2_1406 : !torch.vtensor<[2,4096,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,4096,64],f16> | |
%int2_1407 = torch.constant.int 2 | |
%int-1_1408 = torch.constant.int -1 | |
%int10_1409 = torch.constant.int 10 | |
%int64_1410 = torch.constant.int 64 | |
%1294 = torch.prim.ListConstruct %int2_1407, %int-1_1408, %int10_1409, %int64_1410 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1295 = torch.aten.view %1283, %1294 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_1411 = torch.constant.int 1 | |
%int2_1412 = torch.constant.int 2 | |
%1296 = torch.aten.transpose.int %1295, %int1_1411, %int2_1412 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%int2_1413 = torch.constant.int 2 | |
%int-1_1414 = torch.constant.int -1 | |
%int10_1415 = torch.constant.int 10 | |
%int64_1416 = torch.constant.int 64 | |
%1297 = torch.prim.ListConstruct %int2_1413, %int-1_1414, %int10_1415, %int64_1416 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1298 = torch.aten.view %1290, %1297 : !torch.vtensor<[2,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,10,64],f16> | |
%int1_1417 = torch.constant.int 1 | |
%int2_1418 = torch.constant.int 2 | |
%1299 = torch.aten.transpose.int %1298, %int1_1417, %int2_1418 : !torch.vtensor<[2,64,10,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,10,64,64],f16> | |
%float0.000000e00_1419 = torch.constant.float 0.000000e+00 | |
%false_1420 = torch.constant.bool false | |
%none_1421 = torch.constant.none | |
%none_1422 = torch.constant.none | |
%1300:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1293, %1296, %1299, %float0.000000e00_1419, %false_1420, %none_1421, %none_1422) : (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.vtensor<[2,10,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,10,4096,64],f16>, !torch.vtensor<[2,10,4096],f32>) | |
%1301 = torch.aten.detach %1300#0 : !torch.vtensor<[2,10,4096,64],f16> -> !torch.vtensor<[2,10,4096,64],f16> | |
%int1_1423 = torch.constant.int 1 | |
%int2_1424 = torch.constant.int 2 | |
%1302 = torch.aten.transpose.int %1300#0, %int1_1423, %int2_1424 : !torch.vtensor<[2,10,4096,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,4096,10,64],f16> | |
%int2_1425 = torch.constant.int 2 | |
%int-1_1426 = torch.constant.int -1 | |
%int640_1427 = torch.constant.int 640 | |
%1303 = torch.prim.ListConstruct %int2_1425, %int-1_1426, %int640_1427 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1304 = torch.aten.view %1302, %1303 : !torch.vtensor<[2,4096,10,64],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_1428 = torch.constant.int 8192 | |
%int640_1429 = torch.constant.int 640 | |
%1305 = torch.prim.ListConstruct %int8192_1428, %int640_1429 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1306 = torch.aten.view %1304, %1305 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight : tensor<640x640xf16> | |
%1307 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1430 = torch.constant.int 0 | |
%int1_1431 = torch.constant.int 1 | |
%1308 = torch.aten.transpose.int %1307, %int0_1430, %int1_1431 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias : tensor<640xf16> | |
%1309 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1432 = torch.constant.int 6 | |
%1310 = torch.prims.convert_element_type %1309, %int6_1432 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1433 = torch.constant.int 6 | |
%1311 = torch.prims.convert_element_type %1306, %int6_1433 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1434 = torch.constant.int 6 | |
%1312 = torch.prims.convert_element_type %1308, %int6_1434 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%1313 = torch.aten.mm %1311, %1312 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1435 = torch.constant.int 1 | |
%1314 = torch.aten.mul.Scalar %1313, %int1_1435 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1436 = torch.constant.int 1 | |
%1315 = torch.aten.mul.Scalar %1310, %int1_1436 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1437 = torch.constant.int 1 | |
%1316 = torch.aten.add.Tensor %1314, %1315, %int1_1437 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1438 = torch.constant.int 5 | |
%1317 = torch.prims.convert_element_type %1316, %int5_1438 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1439 = torch.constant.int 2 | |
%int4096_1440 = torch.constant.int 4096 | |
%int640_1441 = torch.constant.int 640 | |
%1318 = torch.prim.ListConstruct %int2_1439, %int4096_1440, %int640_1441 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1319 = torch.aten.view %1317, %1318 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%none_1442 = torch.constant.none | |
%1320 = torch.aten.clone %1319, %none_1442 : !torch.vtensor<[2,4096,640],f16>, !torch.none -> !torch.vtensor<[2,4096,640],f16> | |
%float1.000000e00_1443 = torch.constant.float 1.000000e+00 | |
%1321 = torch.aten.div.Scalar %1320, %float1.000000e00_1443 : !torch.vtensor<[2,4096,640],f16>, !torch.float -> !torch.vtensor<[2,4096,640],f16> | |
%int1_1444 = torch.constant.int 1 | |
%1322 = torch.aten.add.Tensor %1321, %1256, %int1_1444 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int6_1445 = torch.constant.int 6 | |
%1323 = torch.prims.convert_element_type %1322, %int6_1445 : !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int2_1446 = torch.constant.int 2 | |
%1324 = torch.prim.ListConstruct %int2_1446 : (!torch.int) -> !torch.list<int> | |
%int0_1447 = torch.constant.int 0 | |
%true_1448 = torch.constant.bool true | |
%result0_1449, %result1_1450 = torch.aten.var_mean.correction %1323, %1324, %int0_1447, %true_1448 : !torch.vtensor<[2,4096,640],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,4096,1],f32>, !torch.vtensor<[2,4096,1],f32> | |
%float1.000000e-05_1451 = torch.constant.float 1.000000e-05 | |
%int1_1452 = torch.constant.int 1 | |
%1325 = torch.aten.add.Scalar %result0_1449, %float1.000000e-05_1451, %int1_1452 : !torch.vtensor<[2,4096,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,4096,1],f32> | |
%1326 = torch.aten.rsqrt %1325 : !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,1],f32> | |
%int1_1453 = torch.constant.int 1 | |
%1327 = torch.aten.sub.Tensor %1322, %result1_1450, %int1_1453 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%1328 = torch.aten.mul.Tensor %1327, %1326 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[2,4096,1],f32> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight : tensor<640xf16> | |
%1329 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%1330 = torch.aten.mul.Tensor %1328, %1329 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16> -> !torch.vtensor<[2,4096,640],f32> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias : tensor<640xf16> | |
%1331 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int1_1454 = torch.constant.int 1 | |
%1332 = torch.aten.add.Tensor %1330, %1331, %int1_1454 : !torch.vtensor<[2,4096,640],f32>, !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f32> | |
%int5_1455 = torch.constant.int 5 | |
%1333 = torch.prims.convert_element_type %1332, %int5_1455 : !torch.vtensor<[2,4096,640],f32>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int5_1456 = torch.constant.int 5 | |
%1334 = torch.prims.convert_element_type %result1_1450, %int5_1456 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int5_1457 = torch.constant.int 5 | |
%1335 = torch.prims.convert_element_type %1326, %int5_1457 : !torch.vtensor<[2,4096,1],f32>, !torch.int -> !torch.vtensor<[2,4096,1],f16> | |
%int8192_1458 = torch.constant.int 8192 | |
%int640_1459 = torch.constant.int 640 | |
%1336 = torch.prim.ListConstruct %int8192_1458, %int640_1459 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1337 = torch.aten.view %1333, %1336 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight : tensor<5120x640xf16> | |
%1338 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight : tensor<5120x640xf16> -> !torch.vtensor<[5120,640],f16> | |
%int0_1460 = torch.constant.int 0 | |
%int1_1461 = torch.constant.int 1 | |
%1339 = torch.aten.transpose.int %1338, %int0_1460, %int1_1461 : !torch.vtensor<[5120,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,5120],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias : tensor<5120xf16> | |
%1340 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias : tensor<5120xf16> -> !torch.vtensor<[5120],f16> | |
%int6_1462 = torch.constant.int 6 | |
%1341 = torch.prims.convert_element_type %1340, %int6_1462 : !torch.vtensor<[5120],f16>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int6_1463 = torch.constant.int 6 | |
%1342 = torch.prims.convert_element_type %1337, %int6_1463 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1464 = torch.constant.int 6 | |
%1343 = torch.prims.convert_element_type %1339, %int6_1464 : !torch.vtensor<[640,5120],f16>, !torch.int -> !torch.vtensor<[640,5120],f32> | |
%1344 = torch.aten.mm %1342, %1343 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,5120],f32> -> !torch.vtensor<[8192,5120],f32> | |
%int1_1465 = torch.constant.int 1 | |
%1345 = torch.aten.mul.Scalar %1344, %int1_1465 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int1_1466 = torch.constant.int 1 | |
%1346 = torch.aten.mul.Scalar %1341, %int1_1466 : !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[5120],f32> | |
%int1_1467 = torch.constant.int 1 | |
%1347 = torch.aten.add.Tensor %1345, %1346, %int1_1467 : !torch.vtensor<[8192,5120],f32>, !torch.vtensor<[5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f32> | |
%int5_1468 = torch.constant.int 5 | |
%1348 = torch.prims.convert_element_type %1347, %int5_1468 : !torch.vtensor<[8192,5120],f32>, !torch.int -> !torch.vtensor<[8192,5120],f16> | |
%int2_1469 = torch.constant.int 2 | |
%int4096_1470 = torch.constant.int 4096 | |
%int5120_1471 = torch.constant.int 5120 | |
%1349 = torch.prim.ListConstruct %int2_1469, %int4096_1470, %int5120_1471 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1350 = torch.aten.view %1348, %1349 : !torch.vtensor<[8192,5120],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,5120],f16> | |
%int-1_1472 = torch.constant.int -1 | |
%int0_1473 = torch.constant.int 0 | |
%int2560_1474 = torch.constant.int 2560 | |
%int1_1475 = torch.constant.int 1 | |
%1351 = torch.aten.slice.Tensor %1350, %int-1_1472, %int0_1473, %int2560_1474, %int1_1475 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%int-1_1476 = torch.constant.int -1 | |
%int2560_1477 = torch.constant.int 2560 | |
%int5120_1478 = torch.constant.int 5120 | |
%int1_1479 = torch.constant.int 1 | |
%1352 = torch.aten.slice.Tensor %1350, %int-1_1476, %int2560_1477, %int5120_1478, %int1_1479 : !torch.vtensor<[2,4096,5120],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,4096,2560],f16> | |
%str_1480 = torch.constant.str "none" | |
%1353 = torch.aten.gelu %1352, %str_1480 : !torch.vtensor<[2,4096,2560],f16>, !torch.str -> !torch.vtensor<[2,4096,2560],f16> | |
%1354 = torch.aten.mul.Tensor %1351, %1353 : !torch.vtensor<[2,4096,2560],f16>, !torch.vtensor<[2,4096,2560],f16> -> !torch.vtensor<[2,4096,2560],f16> | |
%none_1481 = torch.constant.none | |
%1355 = torch.aten.clone %1354, %none_1481 : !torch.vtensor<[2,4096,2560],f16>, !torch.none -> !torch.vtensor<[2,4096,2560],f16> | |
%int8192_1482 = torch.constant.int 8192 | |
%int2560_1483 = torch.constant.int 2560 | |
%1356 = torch.prim.ListConstruct %int8192_1482, %int2560_1483 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1357 = torch.aten.view %1355, %1356 : !torch.vtensor<[2,4096,2560],f16>, !torch.list<int> -> !torch.vtensor<[8192,2560],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight : tensor<640x2560xf16> | |
%1358 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight : tensor<640x2560xf16> -> !torch.vtensor<[640,2560],f16> | |
%int0_1484 = torch.constant.int 0 | |
%int1_1485 = torch.constant.int 1 | |
%1359 = torch.aten.transpose.int %1358, %int0_1484, %int1_1485 : !torch.vtensor<[640,2560],f16>, !torch.int, !torch.int -> !torch.vtensor<[2560,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias : tensor<640xf16> | |
%1360 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1486 = torch.constant.int 6 | |
%1361 = torch.prims.convert_element_type %1360, %int6_1486 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1487 = torch.constant.int 6 | |
%1362 = torch.prims.convert_element_type %1357, %int6_1487 : !torch.vtensor<[8192,2560],f16>, !torch.int -> !torch.vtensor<[8192,2560],f32> | |
%int6_1488 = torch.constant.int 6 | |
%1363 = torch.prims.convert_element_type %1359, %int6_1488 : !torch.vtensor<[2560,640],f16>, !torch.int -> !torch.vtensor<[2560,640],f32> | |
%1364 = torch.aten.mm %1362, %1363 : !torch.vtensor<[8192,2560],f32>, !torch.vtensor<[2560,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1489 = torch.constant.int 1 | |
%1365 = torch.aten.mul.Scalar %1364, %int1_1489 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1490 = torch.constant.int 1 | |
%1366 = torch.aten.mul.Scalar %1361, %int1_1490 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1491 = torch.constant.int 1 | |
%1367 = torch.aten.add.Tensor %1365, %1366, %int1_1491 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1492 = torch.constant.int 5 | |
%1368 = torch.prims.convert_element_type %1367, %int5_1492 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1493 = torch.constant.int 2 | |
%int4096_1494 = torch.constant.int 4096 | |
%int640_1495 = torch.constant.int 640 | |
%1369 = torch.prim.ListConstruct %int2_1493, %int4096_1494, %int640_1495 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1370 = torch.aten.view %1368, %1369 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int1_1496 = torch.constant.int 1 | |
%1371 = torch.aten.add.Tensor %1370, %1322, %int1_1496 : !torch.vtensor<[2,4096,640],f16>, !torch.vtensor<[2,4096,640],f16>, !torch.int -> !torch.vtensor<[2,4096,640],f16> | |
%int8192_1497 = torch.constant.int 8192 | |
%int640_1498 = torch.constant.int 640 | |
%1372 = torch.prim.ListConstruct %int8192_1497, %int640_1498 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1373 = torch.aten.view %1371, %1372 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[8192,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.proj_out.weight = util.global.load @_params.unet.down_blocks.1.attentions.1.proj_out.weight : tensor<640x640xf16> | |
%1374 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.proj_out.weight : tensor<640x640xf16> -> !torch.vtensor<[640,640],f16> | |
%int0_1499 = torch.constant.int 0 | |
%int1_1500 = torch.constant.int 1 | |
%1375 = torch.aten.transpose.int %1374, %int0_1499, %int1_1500 : !torch.vtensor<[640,640],f16>, !torch.int, !torch.int -> !torch.vtensor<[640,640],f16> | |
%_params.unet.down_blocks.1.attentions.1.proj_out.bias = util.global.load @_params.unet.down_blocks.1.attentions.1.proj_out.bias : tensor<640xf16> | |
%1376 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.attentions.1.proj_out.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int6_1501 = torch.constant.int 6 | |
%1377 = torch.prims.convert_element_type %1376, %int6_1501 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[640],f32> | |
%int6_1502 = torch.constant.int 6 | |
%1378 = torch.prims.convert_element_type %1373, %int6_1502 : !torch.vtensor<[8192,640],f16>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int6_1503 = torch.constant.int 6 | |
%1379 = torch.prims.convert_element_type %1375, %int6_1503 : !torch.vtensor<[640,640],f16>, !torch.int -> !torch.vtensor<[640,640],f32> | |
%1380 = torch.aten.mm %1378, %1379 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640,640],f32> -> !torch.vtensor<[8192,640],f32> | |
%int1_1504 = torch.constant.int 1 | |
%1381 = torch.aten.mul.Scalar %1380, %int1_1504 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int1_1505 = torch.constant.int 1 | |
%1382 = torch.aten.mul.Scalar %1377, %int1_1505 : !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[640],f32> | |
%int1_1506 = torch.constant.int 1 | |
%1383 = torch.aten.add.Tensor %1381, %1382, %int1_1506 : !torch.vtensor<[8192,640],f32>, !torch.vtensor<[640],f32>, !torch.int -> !torch.vtensor<[8192,640],f32> | |
%int5_1507 = torch.constant.int 5 | |
%1384 = torch.prims.convert_element_type %1383, %int5_1507 : !torch.vtensor<[8192,640],f32>, !torch.int -> !torch.vtensor<[8192,640],f16> | |
%int2_1508 = torch.constant.int 2 | |
%int4096_1509 = torch.constant.int 4096 | |
%int640_1510 = torch.constant.int 640 | |
%1385 = torch.prim.ListConstruct %int2_1508, %int4096_1509, %int640_1510 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1386 = torch.aten.view %1384, %1385 : !torch.vtensor<[8192,640],f16>, !torch.list<int> -> !torch.vtensor<[2,4096,640],f16> | |
%int2_1511 = torch.constant.int 2 | |
%int64_1512 = torch.constant.int 64 | |
%int64_1513 = torch.constant.int 64 | |
%int640_1514 = torch.constant.int 640 | |
%1387 = torch.prim.ListConstruct %int2_1511, %int64_1512, %int64_1513, %int640_1514 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1388 = torch.aten.view %1386, %1387 : !torch.vtensor<[2,4096,640],f16>, !torch.list<int> -> !torch.vtensor<[2,64,64,640],f16> | |
%int0_1515 = torch.constant.int 0 | |
%int3_1516 = torch.constant.int 3 | |
%int1_1517 = torch.constant.int 1 | |
%int2_1518 = torch.constant.int 2 | |
%1389 = torch.prim.ListConstruct %int0_1515, %int3_1516, %int1_1517, %int2_1518 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1390 = torch.aten.permute %1388, %1389 : !torch.vtensor<[2,64,64,640],f16>, !torch.list<int> -> !torch.vtensor<[2,640,64,64],f16> | |
%int0_1519 = torch.constant.int 0 | |
%1391 = torch.aten.clone %1390, %int0_1519 : !torch.vtensor<[2,640,64,64],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%int1_1520 = torch.constant.int 1 | |
%1392 = torch.aten.add.Tensor %1391, %962, %int1_1520 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[2,640,64,64],f16>, !torch.int -> !torch.vtensor<[2,640,64,64],f16> | |
%_params.unet.down_blocks.1.downsamplers.0.conv.weight = util.global.load @_params.unet.down_blocks.1.downsamplers.0.conv.weight : tensor<640x640x3x3xf16> | |
%1393 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.downsamplers.0.conv.weight : tensor<640x640x3x3xf16> -> !torch.vtensor<[640,640,3,3],f16> | |
%_params.unet.down_blocks.1.downsamplers.0.conv.bias = util.global.load @_params.unet.down_blocks.1.downsamplers.0.conv.bias : tensor<640xf16> | |
%1394 = torch_c.from_builtin_tensor %_params.unet.down_blocks.1.downsamplers.0.conv.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int2_1521 = torch.constant.int 2 | |
%int2_1522 = torch.constant.int 2 | |
%1395 = torch.prim.ListConstruct %int2_1521, %int2_1522 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1523 = torch.constant.int 1 | |
%int1_1524 = torch.constant.int 1 | |
%1396 = torch.prim.ListConstruct %int1_1523, %int1_1524 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1525 = torch.constant.int 1 | |
%int1_1526 = torch.constant.int 1 | |
%1397 = torch.prim.ListConstruct %int1_1525, %int1_1526 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1527 = torch.constant.bool false | |
%int0_1528 = torch.constant.int 0 | |
%int0_1529 = torch.constant.int 0 | |
%1398 = torch.prim.ListConstruct %int0_1528, %int0_1529 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1530 = torch.constant.int 1 | |
%1399 = torch.aten.convolution %1392, %1393, %1394, %1395, %1396, %1397, %false_1527, %1398, %int1_1530 : !torch.vtensor<[2,640,64,64],f16>, !torch.vtensor<[640,640,3,3],f16>, !torch.vtensor<[640],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,640,32,32],f16> | |
%int2_1531 = torch.constant.int 2 | |
%int32_1532 = torch.constant.int 32 | |
%int20_1533 = torch.constant.int 20 | |
%int1024 = torch.constant.int 1024 | |
%1400 = torch.prim.ListConstruct %int2_1531, %int32_1532, %int20_1533, %int1024 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1401 = torch.aten.view %1399, %1400 : !torch.vtensor<[2,640,32,32],f16>, !torch.list<int> -> !torch.vtensor<[2,32,20,1024],f16> | |
%int6_1534 = torch.constant.int 6 | |
%1402 = torch.prims.convert_element_type %1401, %int6_1534 : !torch.vtensor<[2,32,20,1024],f16>, !torch.int -> !torch.vtensor<[2,32,20,1024],f32> | |
%int2_1535 = torch.constant.int 2 | |
%int3_1536 = torch.constant.int 3 | |
%1403 = torch.prim.ListConstruct %int2_1535, %int3_1536 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_1537 = torch.constant.int 0 | |
%true_1538 = torch.constant.bool true | |
%result0_1539, %result1_1540 = torch.aten.var_mean.correction %1402, %1403, %int0_1537, %true_1538 : !torch.vtensor<[2,32,20,1024],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_1541 = torch.constant.float 1.000000e-05 | |
%int1_1542 = torch.constant.int 1 | |
%1404 = torch.aten.add.Scalar %result0_1539, %float1.000000e-05_1541, %int1_1542 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%1405 = torch.aten.rsqrt %1404 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_1543 = torch.constant.int 1 | |
%1406 = torch.aten.sub.Tensor %1401, %result1_1540, %int1_1543 : !torch.vtensor<[2,32,20,1024],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,20,1024],f32> | |
%1407 = torch.aten.mul.Tensor %1406, %1405 : !torch.vtensor<[2,32,20,1024],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,20,1024],f32> | |
%int2_1544 = torch.constant.int 2 | |
%int640_1545 = torch.constant.int 640 | |
%int32_1546 = torch.constant.int 32 | |
%int32_1547 = torch.constant.int 32 | |
%1408 = torch.prim.ListConstruct %int2_1544, %int640_1545, %int32_1546, %int32_1547 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1409 = torch.aten.view %1407, %1408 : !torch.vtensor<[2,32,20,1024],f32>, !torch.list<int> -> !torch.vtensor<[2,640,32,32],f32> | |
%_params.unet.down_blocks.2.resnets.0.norm1.bias = util.global.load @_params.unet.down_blocks.2.resnets.0.norm1.bias : tensor<640xf16> | |
%1410 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.norm1.bias : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_1548 = torch.constant.int 0 | |
%1411 = torch.aten.unsqueeze %1410, %int0_1548 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_1549 = torch.constant.int 2 | |
%1412 = torch.aten.unsqueeze %1411, %int2_1549 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_1550 = torch.constant.int 3 | |
%1413 = torch.aten.unsqueeze %1412, %int3_1550 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%_params.unet.down_blocks.2.resnets.0.norm1.weight = util.global.load @_params.unet.down_blocks.2.resnets.0.norm1.weight : tensor<640xf16> | |
%1414 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.norm1.weight : tensor<640xf16> -> !torch.vtensor<[640],f16> | |
%int0_1551 = torch.constant.int 0 | |
%1415 = torch.aten.unsqueeze %1414, %int0_1551 : !torch.vtensor<[640],f16>, !torch.int -> !torch.vtensor<[1,640],f16> | |
%int2_1552 = torch.constant.int 2 | |
%1416 = torch.aten.unsqueeze %1415, %int2_1552 : !torch.vtensor<[1,640],f16>, !torch.int -> !torch.vtensor<[1,640,1],f16> | |
%int3_1553 = torch.constant.int 3 | |
%1417 = torch.aten.unsqueeze %1416, %int3_1553 : !torch.vtensor<[1,640,1],f16>, !torch.int -> !torch.vtensor<[1,640,1,1],f16> | |
%1418 = torch.aten.mul.Tensor %1409, %1417 : !torch.vtensor<[2,640,32,32],f32>, !torch.vtensor<[1,640,1,1],f16> -> !torch.vtensor<[2,640,32,32],f32> | |
%int1_1554 = torch.constant.int 1 | |
%1419 = torch.aten.add.Tensor %1418, %1413, %int1_1554 : !torch.vtensor<[2,640,32,32],f32>, !torch.vtensor<[1,640,1,1],f16>, !torch.int -> !torch.vtensor<[2,640,32,32],f32> | |
%int5_1555 = torch.constant.int 5 | |
%1420 = torch.prims.convert_element_type %1419, %int5_1555 : !torch.vtensor<[2,640,32,32],f32>, !torch.int -> !torch.vtensor<[2,640,32,32],f16> | |
%int5_1556 = torch.constant.int 5 | |
%1421 = torch.prims.convert_element_type %result1_1540, %int5_1556 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_1557 = torch.constant.int 5 | |
%1422 = torch.prims.convert_element_type %1405, %int5_1557 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_1558 = torch.constant.int 3 | |
%1423 = torch.prim.ListConstruct %int3_1558 : (!torch.int) -> !torch.list<int> | |
%1424 = torch.prims.squeeze %1421, %1423 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1559 = torch.constant.int 2 | |
%1425 = torch.prim.ListConstruct %int2_1559 : (!torch.int) -> !torch.list<int> | |
%1426 = torch.prims.squeeze %1424, %1425 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_1560 = torch.constant.int 3 | |
%1427 = torch.prim.ListConstruct %int3_1560 : (!torch.int) -> !torch.list<int> | |
%1428 = torch.prims.squeeze %1422, %1427 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1561 = torch.constant.int 2 | |
%1429 = torch.prim.ListConstruct %int2_1561 : (!torch.int) -> !torch.list<int> | |
%1430 = torch.prims.squeeze %1428, %1429 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%1431 = torch.aten.detach %1426 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%1432 = torch.aten.detach %1430 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%1433 = torch.aten.silu %1420 : !torch.vtensor<[2,640,32,32],f16> -> !torch.vtensor<[2,640,32,32],f16> | |
%_params.unet.down_blocks.2.resnets.0.conv1.weight = util.global.load @_params.unet.down_blocks.2.resnets.0.conv1.weight : tensor<1280x640x3x3xf16> | |
%1434 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.conv1.weight : tensor<1280x640x3x3xf16> -> !torch.vtensor<[1280,640,3,3],f16> | |
%_params.unet.down_blocks.2.resnets.0.conv1.bias = util.global.load @_params.unet.down_blocks.2.resnets.0.conv1.bias : tensor<1280xf16> | |
%1435 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.conv1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1562 = torch.constant.int 1 | |
%int1_1563 = torch.constant.int 1 | |
%1436 = torch.prim.ListConstruct %int1_1562, %int1_1563 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1564 = torch.constant.int 1 | |
%int1_1565 = torch.constant.int 1 | |
%1437 = torch.prim.ListConstruct %int1_1564, %int1_1565 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1566 = torch.constant.int 1 | |
%int1_1567 = torch.constant.int 1 | |
%1438 = torch.prim.ListConstruct %int1_1566, %int1_1567 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1568 = torch.constant.bool false | |
%int0_1569 = torch.constant.int 0 | |
%int0_1570 = torch.constant.int 0 | |
%1439 = torch.prim.ListConstruct %int0_1569, %int0_1570 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1571 = torch.constant.int 1 | |
%1440 = torch.aten.convolution %1433, %1434, %1435, %1436, %1437, %1438, %false_1568, %1439, %int1_1571 : !torch.vtensor<[2,640,32,32],f16>, !torch.vtensor<[1280,640,3,3],f16>, !torch.vtensor<[1280],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%1441 = torch.aten.silu %103 : !torch.vtensor<[2,1280],f16> -> !torch.vtensor<[2,1280],f16> | |
%_params.unet.down_blocks.2.resnets.0.time_emb_proj.weight = util.global.load @_params.unet.down_blocks.2.resnets.0.time_emb_proj.weight : tensor<1280x1280xf16> | |
%1442 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.time_emb_proj.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1572 = torch.constant.int 0 | |
%int1_1573 = torch.constant.int 1 | |
%1443 = torch.aten.transpose.int %1442, %int0_1572, %int1_1573 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.resnets.0.time_emb_proj.bias = util.global.load @_params.unet.down_blocks.2.resnets.0.time_emb_proj.bias : tensor<1280xf16> | |
%1444 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.time_emb_proj.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_1574 = torch.constant.int 6 | |
%1445 = torch.prims.convert_element_type %1444, %int6_1574 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_1575 = torch.constant.int 6 | |
%1446 = torch.prims.convert_element_type %1441, %int6_1575 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int6_1576 = torch.constant.int 6 | |
%1447 = torch.prims.convert_element_type %1443, %int6_1576 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%1448 = torch.aten.mm %1446, %1447 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2,1280],f32> | |
%int1_1577 = torch.constant.int 1 | |
%1449 = torch.aten.mul.Scalar %1448, %int1_1577 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int1_1578 = torch.constant.int 1 | |
%1450 = torch.aten.mul.Scalar %1445, %int1_1578 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_1579 = torch.constant.int 1 | |
%1451 = torch.aten.add.Tensor %1449, %1450, %int1_1579 : !torch.vtensor<[2,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f32> | |
%int5_1580 = torch.constant.int 5 | |
%1452 = torch.prims.convert_element_type %1451, %int5_1580 : !torch.vtensor<[2,1280],f32>, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%int0_1581 = torch.constant.int 0 | |
%int0_1582 = torch.constant.int 0 | |
%int9223372036854775807_1583 = torch.constant.int 9223372036854775807 | |
%int1_1584 = torch.constant.int 1 | |
%1453 = torch.aten.slice.Tensor %1452, %int0_1581, %int0_1582, %int9223372036854775807_1583, %int1_1584 : !torch.vtensor<[2,1280],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%int1_1585 = torch.constant.int 1 | |
%int0_1586 = torch.constant.int 0 | |
%int9223372036854775807_1587 = torch.constant.int 9223372036854775807 | |
%int1_1588 = torch.constant.int 1 | |
%1454 = torch.aten.slice.Tensor %1453, %int1_1585, %int0_1586, %int9223372036854775807_1587, %int1_1588 : !torch.vtensor<[2,1280],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1280],f16> | |
%int2_1589 = torch.constant.int 2 | |
%1455 = torch.aten.unsqueeze %1454, %int2_1589 : !torch.vtensor<[2,1280],f16>, !torch.int -> !torch.vtensor<[2,1280,1],f16> | |
%int3_1590 = torch.constant.int 3 | |
%1456 = torch.aten.unsqueeze %1455, %int3_1590 : !torch.vtensor<[2,1280,1],f16>, !torch.int -> !torch.vtensor<[2,1280,1,1],f16> | |
%int1_1591 = torch.constant.int 1 | |
%1457 = torch.aten.add.Tensor %1440, %1456, %int1_1591 : !torch.vtensor<[2,1280,32,32],f16>, !torch.vtensor<[2,1280,1,1],f16>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%int2_1592 = torch.constant.int 2 | |
%int32_1593 = torch.constant.int 32 | |
%int40 = torch.constant.int 40 | |
%int1024_1594 = torch.constant.int 1024 | |
%1458 = torch.prim.ListConstruct %int2_1592, %int32_1593, %int40, %int1024_1594 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1459 = torch.aten.view %1457, %1458 : !torch.vtensor<[2,1280,32,32],f16>, !torch.list<int> -> !torch.vtensor<[2,32,40,1024],f16> | |
%int6_1595 = torch.constant.int 6 | |
%1460 = torch.prims.convert_element_type %1459, %int6_1595 : !torch.vtensor<[2,32,40,1024],f16>, !torch.int -> !torch.vtensor<[2,32,40,1024],f32> | |
%int2_1596 = torch.constant.int 2 | |
%int3_1597 = torch.constant.int 3 | |
%1461 = torch.prim.ListConstruct %int2_1596, %int3_1597 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_1598 = torch.constant.int 0 | |
%true_1599 = torch.constant.bool true | |
%result0_1600, %result1_1601 = torch.aten.var_mean.correction %1460, %1461, %int0_1598, %true_1599 : !torch.vtensor<[2,32,40,1024],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float1.000000e-05_1602 = torch.constant.float 1.000000e-05 | |
%int1_1603 = torch.constant.int 1 | |
%1462 = torch.aten.add.Scalar %result0_1600, %float1.000000e-05_1602, %int1_1603 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%1463 = torch.aten.rsqrt %1462 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_1604 = torch.constant.int 1 | |
%1464 = torch.aten.sub.Tensor %1459, %result1_1601, %int1_1604 : !torch.vtensor<[2,32,40,1024],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,40,1024],f32> | |
%1465 = torch.aten.mul.Tensor %1464, %1463 : !torch.vtensor<[2,32,40,1024],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,40,1024],f32> | |
%int2_1605 = torch.constant.int 2 | |
%int1280 = torch.constant.int 1280 | |
%int32_1606 = torch.constant.int 32 | |
%int32_1607 = torch.constant.int 32 | |
%1466 = torch.prim.ListConstruct %int2_1605, %int1280, %int32_1606, %int32_1607 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1467 = torch.aten.view %1465, %1466 : !torch.vtensor<[2,32,40,1024],f32>, !torch.list<int> -> !torch.vtensor<[2,1280,32,32],f32> | |
%_params.unet.down_blocks.2.resnets.0.norm2.bias = util.global.load @_params.unet.down_blocks.2.resnets.0.norm2.bias : tensor<1280xf16> | |
%1468 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.norm2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int0_1608 = torch.constant.int 0 | |
%1469 = torch.aten.unsqueeze %1468, %int0_1608 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1,1280],f16> | |
%int2_1609 = torch.constant.int 2 | |
%1470 = torch.aten.unsqueeze %1469, %int2_1609 : !torch.vtensor<[1,1280],f16>, !torch.int -> !torch.vtensor<[1,1280,1],f16> | |
%int3_1610 = torch.constant.int 3 | |
%1471 = torch.aten.unsqueeze %1470, %int3_1610 : !torch.vtensor<[1,1280,1],f16>, !torch.int -> !torch.vtensor<[1,1280,1,1],f16> | |
%_params.unet.down_blocks.2.resnets.0.norm2.weight = util.global.load @_params.unet.down_blocks.2.resnets.0.norm2.weight : tensor<1280xf16> | |
%1472 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.norm2.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int0_1611 = torch.constant.int 0 | |
%1473 = torch.aten.unsqueeze %1472, %int0_1611 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1,1280],f16> | |
%int2_1612 = torch.constant.int 2 | |
%1474 = torch.aten.unsqueeze %1473, %int2_1612 : !torch.vtensor<[1,1280],f16>, !torch.int -> !torch.vtensor<[1,1280,1],f16> | |
%int3_1613 = torch.constant.int 3 | |
%1475 = torch.aten.unsqueeze %1474, %int3_1613 : !torch.vtensor<[1,1280,1],f16>, !torch.int -> !torch.vtensor<[1,1280,1,1],f16> | |
%1476 = torch.aten.mul.Tensor %1467, %1475 : !torch.vtensor<[2,1280,32,32],f32>, !torch.vtensor<[1,1280,1,1],f16> -> !torch.vtensor<[2,1280,32,32],f32> | |
%int1_1614 = torch.constant.int 1 | |
%1477 = torch.aten.add.Tensor %1476, %1471, %int1_1614 : !torch.vtensor<[2,1280,32,32],f32>, !torch.vtensor<[1,1280,1,1],f16>, !torch.int -> !torch.vtensor<[2,1280,32,32],f32> | |
%int5_1615 = torch.constant.int 5 | |
%1478 = torch.prims.convert_element_type %1477, %int5_1615 : !torch.vtensor<[2,1280,32,32],f32>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%int5_1616 = torch.constant.int 5 | |
%1479 = torch.prims.convert_element_type %result1_1601, %int5_1616 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_1617 = torch.constant.int 5 | |
%1480 = torch.prims.convert_element_type %1463, %int5_1617 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_1618 = torch.constant.int 3 | |
%1481 = torch.prim.ListConstruct %int3_1618 : (!torch.int) -> !torch.list<int> | |
%1482 = torch.prims.squeeze %1479, %1481 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1619 = torch.constant.int 2 | |
%1483 = torch.prim.ListConstruct %int2_1619 : (!torch.int) -> !torch.list<int> | |
%1484 = torch.prims.squeeze %1482, %1483 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_1620 = torch.constant.int 3 | |
%1485 = torch.prim.ListConstruct %int3_1620 : (!torch.int) -> !torch.list<int> | |
%1486 = torch.prims.squeeze %1480, %1485 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1621 = torch.constant.int 2 | |
%1487 = torch.prim.ListConstruct %int2_1621 : (!torch.int) -> !torch.list<int> | |
%1488 = torch.prims.squeeze %1486, %1487 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%1489 = torch.aten.detach %1484 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%1490 = torch.aten.detach %1488 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%1491 = torch.aten.silu %1478 : !torch.vtensor<[2,1280,32,32],f16> -> !torch.vtensor<[2,1280,32,32],f16> | |
%none_1622 = torch.constant.none | |
%1492 = torch.aten.clone %1491, %none_1622 : !torch.vtensor<[2,1280,32,32],f16>, !torch.none -> !torch.vtensor<[2,1280,32,32],f16> | |
%_params.unet.down_blocks.2.resnets.0.conv2.weight = util.global.load @_params.unet.down_blocks.2.resnets.0.conv2.weight : tensor<1280x1280x3x3xf16> | |
%1493 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.conv2.weight : tensor<1280x1280x3x3xf16> -> !torch.vtensor<[1280,1280,3,3],f16> | |
%_params.unet.down_blocks.2.resnets.0.conv2.bias = util.global.load @_params.unet.down_blocks.2.resnets.0.conv2.bias : tensor<1280xf16> | |
%1494 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.conv2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1623 = torch.constant.int 1 | |
%int1_1624 = torch.constant.int 1 | |
%1495 = torch.prim.ListConstruct %int1_1623, %int1_1624 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1625 = torch.constant.int 1 | |
%int1_1626 = torch.constant.int 1 | |
%1496 = torch.prim.ListConstruct %int1_1625, %int1_1626 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1627 = torch.constant.int 1 | |
%int1_1628 = torch.constant.int 1 | |
%1497 = torch.prim.ListConstruct %int1_1627, %int1_1628 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1629 = torch.constant.bool false | |
%int0_1630 = torch.constant.int 0 | |
%int0_1631 = torch.constant.int 0 | |
%1498 = torch.prim.ListConstruct %int0_1630, %int0_1631 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1632 = torch.constant.int 1 | |
%1499 = torch.aten.convolution %1492, %1493, %1494, %1495, %1496, %1497, %false_1629, %1498, %int1_1632 : !torch.vtensor<[2,1280,32,32],f16>, !torch.vtensor<[1280,1280,3,3],f16>, !torch.vtensor<[1280],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%_params.unet.down_blocks.2.resnets.0.conv_shortcut.weight = util.global.load @_params.unet.down_blocks.2.resnets.0.conv_shortcut.weight : tensor<1280x640x1x1xf16> | |
%1500 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.conv_shortcut.weight : tensor<1280x640x1x1xf16> -> !torch.vtensor<[1280,640,1,1],f16> | |
%_params.unet.down_blocks.2.resnets.0.conv_shortcut.bias = util.global.load @_params.unet.down_blocks.2.resnets.0.conv_shortcut.bias : tensor<1280xf16> | |
%1501 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.resnets.0.conv_shortcut.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1633 = torch.constant.int 1 | |
%int1_1634 = torch.constant.int 1 | |
%1502 = torch.prim.ListConstruct %int1_1633, %int1_1634 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_1635 = torch.constant.int 0 | |
%int0_1636 = torch.constant.int 0 | |
%1503 = torch.prim.ListConstruct %int0_1635, %int0_1636 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1637 = torch.constant.int 1 | |
%int1_1638 = torch.constant.int 1 | |
%1504 = torch.prim.ListConstruct %int1_1637, %int1_1638 : (!torch.int, !torch.int) -> !torch.list<int> | |
%false_1639 = torch.constant.bool false | |
%int0_1640 = torch.constant.int 0 | |
%int0_1641 = torch.constant.int 0 | |
%1505 = torch.prim.ListConstruct %int0_1640, %int0_1641 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int1_1642 = torch.constant.int 1 | |
%1506 = torch.aten.convolution %1399, %1500, %1501, %1502, %1503, %1504, %false_1639, %1505, %int1_1642 : !torch.vtensor<[2,640,32,32],f16>, !torch.vtensor<[1280,640,1,1],f16>, !torch.vtensor<[1280],f16>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%int1_1643 = torch.constant.int 1 | |
%1507 = torch.aten.add.Tensor %1506, %1499, %int1_1643 : !torch.vtensor<[2,1280,32,32],f16>, !torch.vtensor<[2,1280,32,32],f16>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%float1.000000e00_1644 = torch.constant.float 1.000000e+00 | |
%1508 = torch.aten.div.Scalar %1507, %float1.000000e00_1644 : !torch.vtensor<[2,1280,32,32],f16>, !torch.float -> !torch.vtensor<[2,1280,32,32],f16> | |
%int2_1645 = torch.constant.int 2 | |
%int32_1646 = torch.constant.int 32 | |
%int40_1647 = torch.constant.int 40 | |
%int1024_1648 = torch.constant.int 1024 | |
%1509 = torch.prim.ListConstruct %int2_1645, %int32_1646, %int40_1647, %int1024_1648 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1510 = torch.aten.view %1508, %1509 : !torch.vtensor<[2,1280,32,32],f16>, !torch.list<int> -> !torch.vtensor<[2,32,40,1024],f16> | |
%int6_1649 = torch.constant.int 6 | |
%1511 = torch.prims.convert_element_type %1510, %int6_1649 : !torch.vtensor<[2,32,40,1024],f16>, !torch.int -> !torch.vtensor<[2,32,40,1024],f32> | |
%int2_1650 = torch.constant.int 2 | |
%int3_1651 = torch.constant.int 3 | |
%1512 = torch.prim.ListConstruct %int2_1650, %int3_1651 : (!torch.int, !torch.int) -> !torch.list<int> | |
%int0_1652 = torch.constant.int 0 | |
%true_1653 = torch.constant.bool true | |
%result0_1654, %result1_1655 = torch.aten.var_mean.correction %1511, %1512, %int0_1652, %true_1653 : !torch.vtensor<[2,32,40,1024],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,32,1,1],f32>, !torch.vtensor<[2,32,1,1],f32> | |
%float9.999990e-07_1656 = torch.constant.float 9.9999999999999995E-7 | |
%int1_1657 = torch.constant.int 1 | |
%1513 = torch.aten.add.Scalar %result0_1654, %float9.999990e-07_1656, %int1_1657 : !torch.vtensor<[2,32,1,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,32,1,1],f32> | |
%1514 = torch.aten.rsqrt %1513 : !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,1,1],f32> | |
%int1_1658 = torch.constant.int 1 | |
%1515 = torch.aten.sub.Tensor %1510, %result1_1655, %int1_1658 : !torch.vtensor<[2,32,40,1024],f16>, !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,40,1024],f32> | |
%1516 = torch.aten.mul.Tensor %1515, %1514 : !torch.vtensor<[2,32,40,1024],f32>, !torch.vtensor<[2,32,1,1],f32> -> !torch.vtensor<[2,32,40,1024],f32> | |
%int2_1659 = torch.constant.int 2 | |
%int1280_1660 = torch.constant.int 1280 | |
%int32_1661 = torch.constant.int 32 | |
%int32_1662 = torch.constant.int 32 | |
%1517 = torch.prim.ListConstruct %int2_1659, %int1280_1660, %int32_1661, %int32_1662 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1518 = torch.aten.view %1516, %1517 : !torch.vtensor<[2,32,40,1024],f32>, !torch.list<int> -> !torch.vtensor<[2,1280,32,32],f32> | |
%_params.unet.down_blocks.2.attentions.0.norm.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.norm.bias : tensor<1280xf16> | |
%1519 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.norm.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int0_1663 = torch.constant.int 0 | |
%1520 = torch.aten.unsqueeze %1519, %int0_1663 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1,1280],f16> | |
%int2_1664 = torch.constant.int 2 | |
%1521 = torch.aten.unsqueeze %1520, %int2_1664 : !torch.vtensor<[1,1280],f16>, !torch.int -> !torch.vtensor<[1,1280,1],f16> | |
%int3_1665 = torch.constant.int 3 | |
%1522 = torch.aten.unsqueeze %1521, %int3_1665 : !torch.vtensor<[1,1280,1],f16>, !torch.int -> !torch.vtensor<[1,1280,1,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.norm.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.norm.weight : tensor<1280xf16> | |
%1523 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.norm.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int0_1666 = torch.constant.int 0 | |
%1524 = torch.aten.unsqueeze %1523, %int0_1666 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1,1280],f16> | |
%int2_1667 = torch.constant.int 2 | |
%1525 = torch.aten.unsqueeze %1524, %int2_1667 : !torch.vtensor<[1,1280],f16>, !torch.int -> !torch.vtensor<[1,1280,1],f16> | |
%int3_1668 = torch.constant.int 3 | |
%1526 = torch.aten.unsqueeze %1525, %int3_1668 : !torch.vtensor<[1,1280,1],f16>, !torch.int -> !torch.vtensor<[1,1280,1,1],f16> | |
%1527 = torch.aten.mul.Tensor %1518, %1526 : !torch.vtensor<[2,1280,32,32],f32>, !torch.vtensor<[1,1280,1,1],f16> -> !torch.vtensor<[2,1280,32,32],f32> | |
%int1_1669 = torch.constant.int 1 | |
%1528 = torch.aten.add.Tensor %1527, %1522, %int1_1669 : !torch.vtensor<[2,1280,32,32],f32>, !torch.vtensor<[1,1280,1,1],f16>, !torch.int -> !torch.vtensor<[2,1280,32,32],f32> | |
%int5_1670 = torch.constant.int 5 | |
%1529 = torch.prims.convert_element_type %1528, %int5_1670 : !torch.vtensor<[2,1280,32,32],f32>, !torch.int -> !torch.vtensor<[2,1280,32,32],f16> | |
%int5_1671 = torch.constant.int 5 | |
%1530 = torch.prims.convert_element_type %result1_1655, %int5_1671 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int5_1672 = torch.constant.int 5 | |
%1531 = torch.prims.convert_element_type %1514, %int5_1672 : !torch.vtensor<[2,32,1,1],f32>, !torch.int -> !torch.vtensor<[2,32,1,1],f16> | |
%int3_1673 = torch.constant.int 3 | |
%1532 = torch.prim.ListConstruct %int3_1673 : (!torch.int) -> !torch.list<int> | |
%1533 = torch.prims.squeeze %1530, %1532 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1674 = torch.constant.int 2 | |
%1534 = torch.prim.ListConstruct %int2_1674 : (!torch.int) -> !torch.list<int> | |
%1535 = torch.prims.squeeze %1533, %1534 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%int3_1675 = torch.constant.int 3 | |
%1536 = torch.prim.ListConstruct %int3_1675 : (!torch.int) -> !torch.list<int> | |
%1537 = torch.prims.squeeze %1531, %1536 : !torch.vtensor<[2,32,1,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32,1],f16> | |
%int2_1676 = torch.constant.int 2 | |
%1538 = torch.prim.ListConstruct %int2_1676 : (!torch.int) -> !torch.list<int> | |
%1539 = torch.prims.squeeze %1537, %1538 : !torch.vtensor<[2,32,1],f16>, !torch.list<int> -> !torch.vtensor<[2,32],f16> | |
%1540 = torch.aten.detach %1535 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%1541 = torch.aten.detach %1539 : !torch.vtensor<[2,32],f16> -> !torch.vtensor<[2,32],f16> | |
%int0_1677 = torch.constant.int 0 | |
%int2_1678 = torch.constant.int 2 | |
%int3_1679 = torch.constant.int 3 | |
%int1_1680 = torch.constant.int 1 | |
%1542 = torch.prim.ListConstruct %int0_1677, %int2_1678, %int3_1679, %int1_1680 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1543 = torch.aten.permute %1529, %1542 : !torch.vtensor<[2,1280,32,32],f16>, !torch.list<int> -> !torch.vtensor<[2,32,32,1280],f16> | |
%int2_1681 = torch.constant.int 2 | |
%int1024_1682 = torch.constant.int 1024 | |
%int1280_1683 = torch.constant.int 1280 | |
%1544 = torch.prim.ListConstruct %int2_1681, %int1024_1682, %int1280_1683 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1545 = torch.aten.view %1543, %1544 : !torch.vtensor<[2,32,32,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.proj_in.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.proj_in.weight : tensor<1280x1280xf16> | |
%1546 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.proj_in.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1684 = torch.constant.int 0 | |
%int1_1685 = torch.constant.int 1 | |
%1547 = torch.aten.transpose.int %1546, %int0_1684, %int1_1685 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int0_1686 = torch.constant.int 0 | |
%1548 = torch.aten.clone %1545, %int0_1686 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_1687 = torch.constant.int 2048 | |
%int1280_1688 = torch.constant.int 1280 | |
%1549 = torch.prim.ListConstruct %int2048_1687, %int1280_1688 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1550 = torch.aten._unsafe_view %1548, %1549 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1551 = torch.aten.mm %1550, %1547 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1689 = torch.constant.int 2 | |
%int1024_1690 = torch.constant.int 1024 | |
%int1280_1691 = torch.constant.int 1280 | |
%1552 = torch.prim.ListConstruct %int2_1689, %int1024_1690, %int1280_1691 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1553 = torch.aten.view %1551, %1552 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.proj_in.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.proj_in.bias : tensor<1280xf16> | |
%1554 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.proj_in.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1692 = torch.constant.int 1 | |
%1555 = torch.aten.add.Tensor %1553, %1554, %int1_1692 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_1693 = torch.constant.int 6 | |
%1556 = torch.prims.convert_element_type %1555, %int6_1693 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_1694 = torch.constant.int 2 | |
%1557 = torch.prim.ListConstruct %int2_1694 : (!torch.int) -> !torch.list<int> | |
%int0_1695 = torch.constant.int 0 | |
%true_1696 = torch.constant.bool true | |
%result0_1697, %result1_1698 = torch.aten.var_mean.correction %1556, %1557, %int0_1695, %true_1696 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_1699 = torch.constant.float 1.000000e-05 | |
%int1_1700 = torch.constant.int 1 | |
%1558 = torch.aten.add.Scalar %result0_1697, %float1.000000e-05_1699, %int1_1700 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1559 = torch.aten.rsqrt %1558 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_1701 = torch.constant.int 1 | |
%1560 = torch.aten.sub.Tensor %1555, %result1_1698, %int1_1701 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1561 = torch.aten.mul.Tensor %1560, %1559 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight : tensor<1280xf16> | |
%1562 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1563 = torch.aten.mul.Tensor %1561, %1562 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias : tensor<1280xf16> | |
%1564 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1702 = torch.constant.int 1 | |
%1565 = torch.aten.add.Tensor %1563, %1564, %int1_1702 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_1703 = torch.constant.int 5 | |
%1566 = torch.prims.convert_element_type %1565, %int5_1703 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_1704 = torch.constant.int 5 | |
%1567 = torch.prims.convert_element_type %result1_1698, %int5_1704 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_1705 = torch.constant.int 5 | |
%1568 = torch.prims.convert_element_type %1559, %int5_1705 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight : tensor<1280x1280xf16> | |
%1569 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1706 = torch.constant.int 0 | |
%int1_1707 = torch.constant.int 1 | |
%1570 = torch.aten.transpose.int %1569, %int0_1706, %int1_1707 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1708 = torch.constant.int 2048 | |
%int1280_1709 = torch.constant.int 1280 | |
%1571 = torch.prim.ListConstruct %int2048_1708, %int1280_1709 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1572 = torch.aten.view %1566, %1571 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1573 = torch.aten.mm %1572, %1570 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1710 = torch.constant.int 2 | |
%int1024_1711 = torch.constant.int 1024 | |
%int1280_1712 = torch.constant.int 1280 | |
%1574 = torch.prim.ListConstruct %int2_1710, %int1024_1711, %int1280_1712 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1575 = torch.aten.view %1573, %1574 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight : tensor<1280x1280xf16> | |
%1576 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_k.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1713 = torch.constant.int 0 | |
%int1_1714 = torch.constant.int 1 | |
%1577 = torch.aten.transpose.int %1576, %int0_1713, %int1_1714 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1715 = torch.constant.int 2048 | |
%int1280_1716 = torch.constant.int 1280 | |
%1578 = torch.prim.ListConstruct %int2048_1715, %int1280_1716 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1579 = torch.aten.view %1566, %1578 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1580 = torch.aten.mm %1579, %1577 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1717 = torch.constant.int 2 | |
%int1024_1718 = torch.constant.int 1024 | |
%int1280_1719 = torch.constant.int 1280 | |
%1581 = torch.prim.ListConstruct %int2_1717, %int1024_1718, %int1280_1719 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1582 = torch.aten.view %1580, %1581 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight : tensor<1280x1280xf16> | |
%1583 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_v.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1720 = torch.constant.int 0 | |
%int1_1721 = torch.constant.int 1 | |
%1584 = torch.aten.transpose.int %1583, %int0_1720, %int1_1721 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1722 = torch.constant.int 2048 | |
%int1280_1723 = torch.constant.int 1280 | |
%1585 = torch.prim.ListConstruct %int2048_1722, %int1280_1723 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1586 = torch.aten.view %1566, %1585 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1587 = torch.aten.mm %1586, %1584 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1724 = torch.constant.int 2 | |
%int1024_1725 = torch.constant.int 1024 | |
%int1280_1726 = torch.constant.int 1280 | |
%1588 = torch.prim.ListConstruct %int2_1724, %int1024_1725, %int1280_1726 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1589 = torch.aten.view %1587, %1588 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2_1727 = torch.constant.int 2 | |
%int-1_1728 = torch.constant.int -1 | |
%int20_1729 = torch.constant.int 20 | |
%int64_1730 = torch.constant.int 64 | |
%1590 = torch.prim.ListConstruct %int2_1727, %int-1_1728, %int20_1729, %int64_1730 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1591 = torch.aten.view %1575, %1590 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1731 = torch.constant.int 1 | |
%int2_1732 = torch.constant.int 2 | |
%1592 = torch.aten.transpose.int %1591, %int1_1731, %int2_1732 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_1733 = torch.constant.int 2 | |
%int-1_1734 = torch.constant.int -1 | |
%int20_1735 = torch.constant.int 20 | |
%int64_1736 = torch.constant.int 64 | |
%1593 = torch.prim.ListConstruct %int2_1733, %int-1_1734, %int20_1735, %int64_1736 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1594 = torch.aten.view %1582, %1593 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1737 = torch.constant.int 1 | |
%int2_1738 = torch.constant.int 2 | |
%1595 = torch.aten.transpose.int %1594, %int1_1737, %int2_1738 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_1739 = torch.constant.int 2 | |
%int-1_1740 = torch.constant.int -1 | |
%int20_1741 = torch.constant.int 20 | |
%int64_1742 = torch.constant.int 64 | |
%1596 = torch.prim.ListConstruct %int2_1739, %int-1_1740, %int20_1741, %int64_1742 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1597 = torch.aten.view %1589, %1596 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1743 = torch.constant.int 1 | |
%int2_1744 = torch.constant.int 2 | |
%1598 = torch.aten.transpose.int %1597, %int1_1743, %int2_1744 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%float0.000000e00_1745 = torch.constant.float 0.000000e+00 | |
%false_1746 = torch.constant.bool false | |
%none_1747 = torch.constant.none | |
%none_1748 = torch.constant.none | |
%1599:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1592, %1595, %1598, %float0.000000e00_1745, %false_1746, %none_1747, %none_1748) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%1600 = torch.aten.detach %1599#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_1749 = torch.constant.int 1 | |
%int2_1750 = torch.constant.int 2 | |
%1601 = torch.aten.transpose.int %1599#0, %int1_1749, %int2_1750 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_1751 = torch.constant.int 2 | |
%int-1_1752 = torch.constant.int -1 | |
%int1280_1753 = torch.constant.int 1280 | |
%1602 = torch.prim.ListConstruct %int2_1751, %int-1_1752, %int1280_1753 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1603 = torch.aten.view %1601, %1602 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_1754 = torch.constant.int 2048 | |
%int1280_1755 = torch.constant.int 1280 | |
%1604 = torch.prim.ListConstruct %int2048_1754, %int1280_1755 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1605 = torch.aten.view %1603, %1604 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight : tensor<1280x1280xf16> | |
%1606 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1756 = torch.constant.int 0 | |
%int1_1757 = torch.constant.int 1 | |
%1607 = torch.aten.transpose.int %1606, %int0_1756, %int1_1757 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16> | |
%1608 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_1758 = torch.constant.int 6 | |
%1609 = torch.prims.convert_element_type %1608, %int6_1758 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_1759 = torch.constant.int 6 | |
%1610 = torch.prims.convert_element_type %1605, %int6_1759 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_1760 = torch.constant.int 6 | |
%1611 = torch.prims.convert_element_type %1607, %int6_1760 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%1612 = torch.aten.mm %1610, %1611 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_1761 = torch.constant.int 1 | |
%1613 = torch.aten.mul.Scalar %1612, %int1_1761 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_1762 = torch.constant.int 1 | |
%1614 = torch.aten.mul.Scalar %1609, %int1_1762 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_1763 = torch.constant.int 1 | |
%1615 = torch.aten.add.Tensor %1613, %1614, %int1_1763 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_1764 = torch.constant.int 5 | |
%1616 = torch.prims.convert_element_type %1615, %int5_1764 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_1765 = torch.constant.int 2 | |
%int1024_1766 = torch.constant.int 1024 | |
%int1280_1767 = torch.constant.int 1280 | |
%1617 = torch.prim.ListConstruct %int2_1765, %int1024_1766, %int1280_1767 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1618 = torch.aten.view %1616, %1617 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_1768 = torch.constant.none | |
%1619 = torch.aten.clone %1618, %none_1768 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_1769 = torch.constant.float 1.000000e+00 | |
%1620 = torch.aten.div.Scalar %1619, %float1.000000e00_1769 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_1770 = torch.constant.int 1 | |
%1621 = torch.aten.add.Tensor %1620, %1555, %int1_1770 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_1771 = torch.constant.int 6 | |
%1622 = torch.prims.convert_element_type %1621, %int6_1771 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_1772 = torch.constant.int 2 | |
%1623 = torch.prim.ListConstruct %int2_1772 : (!torch.int) -> !torch.list<int> | |
%int0_1773 = torch.constant.int 0 | |
%true_1774 = torch.constant.bool true | |
%result0_1775, %result1_1776 = torch.aten.var_mean.correction %1622, %1623, %int0_1773, %true_1774 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_1777 = torch.constant.float 1.000000e-05 | |
%int1_1778 = torch.constant.int 1 | |
%1624 = torch.aten.add.Scalar %result0_1775, %float1.000000e-05_1777, %int1_1778 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1625 = torch.aten.rsqrt %1624 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_1779 = torch.constant.int 1 | |
%1626 = torch.aten.sub.Tensor %1621, %result1_1776, %int1_1779 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1627 = torch.aten.mul.Tensor %1626, %1625 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight : tensor<1280xf16> | |
%1628 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1629 = torch.aten.mul.Tensor %1627, %1628 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias : tensor<1280xf16> | |
%1630 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1780 = torch.constant.int 1 | |
%1631 = torch.aten.add.Tensor %1629, %1630, %int1_1780 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_1781 = torch.constant.int 5 | |
%1632 = torch.prims.convert_element_type %1631, %int5_1781 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_1782 = torch.constant.int 5 | |
%1633 = torch.prims.convert_element_type %result1_1776, %int5_1782 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_1783 = torch.constant.int 5 | |
%1634 = torch.prims.convert_element_type %1625, %int5_1783 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight : tensor<1280x1280xf16> | |
%1635 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1784 = torch.constant.int 0 | |
%int1_1785 = torch.constant.int 1 | |
%1636 = torch.aten.transpose.int %1635, %int0_1784, %int1_1785 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1786 = torch.constant.int 2048 | |
%int1280_1787 = torch.constant.int 1280 | |
%1637 = torch.prim.ListConstruct %int2048_1786, %int1280_1787 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1638 = torch.aten.view %1632, %1637 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1639 = torch.aten.mm %1638, %1636 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1788 = torch.constant.int 2 | |
%int1024_1789 = torch.constant.int 1024 | |
%int1280_1790 = torch.constant.int 1280 | |
%1640 = torch.prim.ListConstruct %int2_1788, %int1024_1789, %int1280_1790 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1641 = torch.aten.view %1639, %1640 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight : tensor<1280x2048xf16> | |
%1642 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_k.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_1791 = torch.constant.int 0 | |
%int1_1792 = torch.constant.int 1 | |
%1643 = torch.aten.transpose.int %1642, %int0_1791, %int1_1792 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_1793 = torch.constant.int 128 | |
%int2048_1794 = torch.constant.int 2048 | |
%1644 = torch.prim.ListConstruct %int128_1793, %int2048_1794 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1645 = torch.aten.view %arg1, %1644 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1646 = torch.aten.mm %1645, %1643 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_1795 = torch.constant.int 2 | |
%int64_1796 = torch.constant.int 64 | |
%int1280_1797 = torch.constant.int 1280 | |
%1647 = torch.prim.ListConstruct %int2_1795, %int64_1796, %int1280_1797 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1648 = torch.aten.view %1646, %1647 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight : tensor<1280x2048xf16> | |
%1649 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_v.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_1798 = torch.constant.int 0 | |
%int1_1799 = torch.constant.int 1 | |
%1650 = torch.aten.transpose.int %1649, %int0_1798, %int1_1799 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_1800 = torch.constant.int 128 | |
%int2048_1801 = torch.constant.int 2048 | |
%1651 = torch.prim.ListConstruct %int128_1800, %int2048_1801 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1652 = torch.aten.view %arg1, %1651 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1653 = torch.aten.mm %1652, %1650 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_1802 = torch.constant.int 2 | |
%int64_1803 = torch.constant.int 64 | |
%int1280_1804 = torch.constant.int 1280 | |
%1654 = torch.prim.ListConstruct %int2_1802, %int64_1803, %int1280_1804 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1655 = torch.aten.view %1653, %1654 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%int2_1805 = torch.constant.int 2 | |
%int-1_1806 = torch.constant.int -1 | |
%int20_1807 = torch.constant.int 20 | |
%int64_1808 = torch.constant.int 64 | |
%1656 = torch.prim.ListConstruct %int2_1805, %int-1_1806, %int20_1807, %int64_1808 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1657 = torch.aten.view %1641, %1656 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1809 = torch.constant.int 1 | |
%int2_1810 = torch.constant.int 2 | |
%1658 = torch.aten.transpose.int %1657, %int1_1809, %int2_1810 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_1811 = torch.constant.int 2 | |
%int-1_1812 = torch.constant.int -1 | |
%int20_1813 = torch.constant.int 20 | |
%int64_1814 = torch.constant.int 64 | |
%1659 = torch.prim.ListConstruct %int2_1811, %int-1_1812, %int20_1813, %int64_1814 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1660 = torch.aten.view %1648, %1659 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_1815 = torch.constant.int 1 | |
%int2_1816 = torch.constant.int 2 | |
%1661 = torch.aten.transpose.int %1660, %int1_1815, %int2_1816 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%int2_1817 = torch.constant.int 2 | |
%int-1_1818 = torch.constant.int -1 | |
%int20_1819 = torch.constant.int 20 | |
%int64_1820 = torch.constant.int 64 | |
%1662 = torch.prim.ListConstruct %int2_1817, %int-1_1818, %int20_1819, %int64_1820 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1663 = torch.aten.view %1655, %1662 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_1821 = torch.constant.int 1 | |
%int2_1822 = torch.constant.int 2 | |
%1664 = torch.aten.transpose.int %1663, %int1_1821, %int2_1822 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%float0.000000e00_1823 = torch.constant.float 0.000000e+00 | |
%false_1824 = torch.constant.bool false | |
%none_1825 = torch.constant.none | |
%none_1826 = torch.constant.none | |
%1665:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1658, %1661, %1664, %float0.000000e00_1823, %false_1824, %none_1825, %none_1826) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%1666 = torch.aten.detach %1665#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_1827 = torch.constant.int 1 | |
%int2_1828 = torch.constant.int 2 | |
%1667 = torch.aten.transpose.int %1665#0, %int1_1827, %int2_1828 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_1829 = torch.constant.int 2 | |
%int-1_1830 = torch.constant.int -1 | |
%int1280_1831 = torch.constant.int 1280 | |
%1668 = torch.prim.ListConstruct %int2_1829, %int-1_1830, %int1280_1831 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1669 = torch.aten.view %1667, %1668 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_1832 = torch.constant.int 2048 | |
%int1280_1833 = torch.constant.int 1280 | |
%1670 = torch.prim.ListConstruct %int2048_1832, %int1280_1833 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1671 = torch.aten.view %1669, %1670 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight : tensor<1280x1280xf16> | |
%1672 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1834 = torch.constant.int 0 | |
%int1_1835 = torch.constant.int 1 | |
%1673 = torch.aten.transpose.int %1672, %int0_1834, %int1_1835 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16> | |
%1674 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_1836 = torch.constant.int 6 | |
%1675 = torch.prims.convert_element_type %1674, %int6_1836 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_1837 = torch.constant.int 6 | |
%1676 = torch.prims.convert_element_type %1671, %int6_1837 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_1838 = torch.constant.int 6 | |
%1677 = torch.prims.convert_element_type %1673, %int6_1838 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%1678 = torch.aten.mm %1676, %1677 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_1839 = torch.constant.int 1 | |
%1679 = torch.aten.mul.Scalar %1678, %int1_1839 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_1840 = torch.constant.int 1 | |
%1680 = torch.aten.mul.Scalar %1675, %int1_1840 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_1841 = torch.constant.int 1 | |
%1681 = torch.aten.add.Tensor %1679, %1680, %int1_1841 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_1842 = torch.constant.int 5 | |
%1682 = torch.prims.convert_element_type %1681, %int5_1842 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_1843 = torch.constant.int 2 | |
%int1024_1844 = torch.constant.int 1024 | |
%int1280_1845 = torch.constant.int 1280 | |
%1683 = torch.prim.ListConstruct %int2_1843, %int1024_1844, %int1280_1845 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1684 = torch.aten.view %1682, %1683 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_1846 = torch.constant.none | |
%1685 = torch.aten.clone %1684, %none_1846 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_1847 = torch.constant.float 1.000000e+00 | |
%1686 = torch.aten.div.Scalar %1685, %float1.000000e00_1847 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_1848 = torch.constant.int 1 | |
%1687 = torch.aten.add.Tensor %1686, %1621, %int1_1848 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_1849 = torch.constant.int 6 | |
%1688 = torch.prims.convert_element_type %1687, %int6_1849 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_1850 = torch.constant.int 2 | |
%1689 = torch.prim.ListConstruct %int2_1850 : (!torch.int) -> !torch.list<int> | |
%int0_1851 = torch.constant.int 0 | |
%true_1852 = torch.constant.bool true | |
%result0_1853, %result1_1854 = torch.aten.var_mean.correction %1688, %1689, %int0_1851, %true_1852 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_1855 = torch.constant.float 1.000000e-05 | |
%int1_1856 = torch.constant.int 1 | |
%1690 = torch.aten.add.Scalar %result0_1853, %float1.000000e-05_1855, %int1_1856 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1691 = torch.aten.rsqrt %1690 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_1857 = torch.constant.int 1 | |
%1692 = torch.aten.sub.Tensor %1687, %result1_1854, %int1_1857 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1693 = torch.aten.mul.Tensor %1692, %1691 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight : tensor<1280xf16> | |
%1694 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1695 = torch.aten.mul.Tensor %1693, %1694 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias : tensor<1280xf16> | |
%1696 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1858 = torch.constant.int 1 | |
%1697 = torch.aten.add.Tensor %1695, %1696, %int1_1858 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_1859 = torch.constant.int 5 | |
%1698 = torch.prims.convert_element_type %1697, %int5_1859 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_1860 = torch.constant.int 5 | |
%1699 = torch.prims.convert_element_type %result1_1854, %int5_1860 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_1861 = torch.constant.int 5 | |
%1700 = torch.prims.convert_element_type %1691, %int5_1861 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int2048_1862 = torch.constant.int 2048 | |
%int1280_1863 = torch.constant.int 1280 | |
%1701 = torch.prim.ListConstruct %int2048_1862, %int1280_1863 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1702 = torch.aten.view %1698, %1701 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight : tensor<10240x1280xf16> | |
%1703 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight : tensor<10240x1280xf16> -> !torch.vtensor<[10240,1280],f16> | |
%int0_1864 = torch.constant.int 0 | |
%int1_1865 = torch.constant.int 1 | |
%1704 = torch.aten.transpose.int %1703, %int0_1864, %int1_1865 : !torch.vtensor<[10240,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,10240],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16> | |
%1705 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias : tensor<10240xf16> -> !torch.vtensor<[10240],f16> | |
%int6_1866 = torch.constant.int 6 | |
%1706 = torch.prims.convert_element_type %1705, %int6_1866 : !torch.vtensor<[10240],f16>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int6_1867 = torch.constant.int 6 | |
%1707 = torch.prims.convert_element_type %1702, %int6_1867 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_1868 = torch.constant.int 6 | |
%1708 = torch.prims.convert_element_type %1704, %int6_1868 : !torch.vtensor<[1280,10240],f16>, !torch.int -> !torch.vtensor<[1280,10240],f32> | |
%1709 = torch.aten.mm %1707, %1708 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,10240],f32> -> !torch.vtensor<[2048,10240],f32> | |
%int1_1869 = torch.constant.int 1 | |
%1710 = torch.aten.mul.Scalar %1709, %int1_1869 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int1_1870 = torch.constant.int 1 | |
%1711 = torch.aten.mul.Scalar %1706, %int1_1870 : !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int1_1871 = torch.constant.int 1 | |
%1712 = torch.aten.add.Tensor %1710, %1711, %int1_1871 : !torch.vtensor<[2048,10240],f32>, !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int5_1872 = torch.constant.int 5 | |
%1713 = torch.prims.convert_element_type %1712, %int5_1872 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f16> | |
%int2_1873 = torch.constant.int 2 | |
%int1024_1874 = torch.constant.int 1024 | |
%int10240 = torch.constant.int 10240 | |
%1714 = torch.prim.ListConstruct %int2_1873, %int1024_1874, %int10240 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1715 = torch.aten.view %1713, %1714 : !torch.vtensor<[2048,10240],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,10240],f16> | |
%int-1_1875 = torch.constant.int -1 | |
%int0_1876 = torch.constant.int 0 | |
%int5120_1877 = torch.constant.int 5120 | |
%int1_1878 = torch.constant.int 1 | |
%1716 = torch.aten.slice.Tensor %1715, %int-1_1875, %int0_1876, %int5120_1877, %int1_1878 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%int-1_1879 = torch.constant.int -1 | |
%int5120_1880 = torch.constant.int 5120 | |
%int10240_1881 = torch.constant.int 10240 | |
%int1_1882 = torch.constant.int 1 | |
%1717 = torch.aten.slice.Tensor %1715, %int-1_1879, %int5120_1880, %int10240_1881, %int1_1882 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%str_1883 = torch.constant.str "none" | |
%1718 = torch.aten.gelu %1717, %str_1883 : !torch.vtensor<[2,1024,5120],f16>, !torch.str -> !torch.vtensor<[2,1024,5120],f16> | |
%1719 = torch.aten.mul.Tensor %1716, %1718 : !torch.vtensor<[2,1024,5120],f16>, !torch.vtensor<[2,1024,5120],f16> -> !torch.vtensor<[2,1024,5120],f16> | |
%none_1884 = torch.constant.none | |
%1720 = torch.aten.clone %1719, %none_1884 : !torch.vtensor<[2,1024,5120],f16>, !torch.none -> !torch.vtensor<[2,1024,5120],f16> | |
%int2048_1885 = torch.constant.int 2048 | |
%int5120_1886 = torch.constant.int 5120 | |
%1721 = torch.prim.ListConstruct %int2048_1885, %int5120_1886 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1722 = torch.aten.view %1720, %1721 : !torch.vtensor<[2,1024,5120],f16>, !torch.list<int> -> !torch.vtensor<[2048,5120],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight : tensor<1280x5120xf16> | |
%1723 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight : tensor<1280x5120xf16> -> !torch.vtensor<[1280,5120],f16> | |
%int0_1887 = torch.constant.int 0 | |
%int1_1888 = torch.constant.int 1 | |
%1724 = torch.aten.transpose.int %1723, %int0_1887, %int1_1888 : !torch.vtensor<[1280,5120],f16>, !torch.int, !torch.int -> !torch.vtensor<[5120,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16> | |
%1725 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_1889 = torch.constant.int 6 | |
%1726 = torch.prims.convert_element_type %1725, %int6_1889 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_1890 = torch.constant.int 6 | |
%1727 = torch.prims.convert_element_type %1722, %int6_1890 : !torch.vtensor<[2048,5120],f16>, !torch.int -> !torch.vtensor<[2048,5120],f32> | |
%int6_1891 = torch.constant.int 6 | |
%1728 = torch.prims.convert_element_type %1724, %int6_1891 : !torch.vtensor<[5120,1280],f16>, !torch.int -> !torch.vtensor<[5120,1280],f32> | |
%1729 = torch.aten.mm %1727, %1728 : !torch.vtensor<[2048,5120],f32>, !torch.vtensor<[5120,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_1892 = torch.constant.int 1 | |
%1730 = torch.aten.mul.Scalar %1729, %int1_1892 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_1893 = torch.constant.int 1 | |
%1731 = torch.aten.mul.Scalar %1726, %int1_1893 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_1894 = torch.constant.int 1 | |
%1732 = torch.aten.add.Tensor %1730, %1731, %int1_1894 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_1895 = torch.constant.int 5 | |
%1733 = torch.prims.convert_element_type %1732, %int5_1895 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_1896 = torch.constant.int 2 | |
%int1024_1897 = torch.constant.int 1024 | |
%int1280_1898 = torch.constant.int 1280 | |
%1734 = torch.prim.ListConstruct %int2_1896, %int1024_1897, %int1280_1898 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1735 = torch.aten.view %1733, %1734 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_1899 = torch.constant.int 1 | |
%1736 = torch.aten.add.Tensor %1735, %1687, %int1_1899 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_1900 = torch.constant.int 6 | |
%1737 = torch.prims.convert_element_type %1736, %int6_1900 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_1901 = torch.constant.int 2 | |
%1738 = torch.prim.ListConstruct %int2_1901 : (!torch.int) -> !torch.list<int> | |
%int0_1902 = torch.constant.int 0 | |
%true_1903 = torch.constant.bool true | |
%result0_1904, %result1_1905 = torch.aten.var_mean.correction %1737, %1738, %int0_1902, %true_1903 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_1906 = torch.constant.float 1.000000e-05 | |
%int1_1907 = torch.constant.int 1 | |
%1739 = torch.aten.add.Scalar %result0_1904, %float1.000000e-05_1906, %int1_1907 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1740 = torch.aten.rsqrt %1739 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_1908 = torch.constant.int 1 | |
%1741 = torch.aten.sub.Tensor %1736, %result1_1905, %int1_1908 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1742 = torch.aten.mul.Tensor %1741, %1740 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight : tensor<1280xf16> | |
%1743 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1744 = torch.aten.mul.Tensor %1742, %1743 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias : tensor<1280xf16> | |
%1745 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1909 = torch.constant.int 1 | |
%1746 = torch.aten.add.Tensor %1744, %1745, %int1_1909 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_1910 = torch.constant.int 5 | |
%1747 = torch.prims.convert_element_type %1746, %int5_1910 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_1911 = torch.constant.int 5 | |
%1748 = torch.prims.convert_element_type %result1_1905, %int5_1911 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_1912 = torch.constant.int 5 | |
%1749 = torch.prims.convert_element_type %1740, %int5_1912 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight : tensor<1280x1280xf16> | |
%1750 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1913 = torch.constant.int 0 | |
%int1_1914 = torch.constant.int 1 | |
%1751 = torch.aten.transpose.int %1750, %int0_1913, %int1_1914 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1915 = torch.constant.int 2048 | |
%int1280_1916 = torch.constant.int 1280 | |
%1752 = torch.prim.ListConstruct %int2048_1915, %int1280_1916 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1753 = torch.aten.view %1747, %1752 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1754 = torch.aten.mm %1753, %1751 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1917 = torch.constant.int 2 | |
%int1024_1918 = torch.constant.int 1024 | |
%int1280_1919 = torch.constant.int 1280 | |
%1755 = torch.prim.ListConstruct %int2_1917, %int1024_1918, %int1280_1919 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1756 = torch.aten.view %1754, %1755 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight : tensor<1280x1280xf16> | |
%1757 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_k.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1920 = torch.constant.int 0 | |
%int1_1921 = torch.constant.int 1 | |
%1758 = torch.aten.transpose.int %1757, %int0_1920, %int1_1921 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1922 = torch.constant.int 2048 | |
%int1280_1923 = torch.constant.int 1280 | |
%1759 = torch.prim.ListConstruct %int2048_1922, %int1280_1923 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1760 = torch.aten.view %1747, %1759 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1761 = torch.aten.mm %1760, %1758 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1924 = torch.constant.int 2 | |
%int1024_1925 = torch.constant.int 1024 | |
%int1280_1926 = torch.constant.int 1280 | |
%1762 = torch.prim.ListConstruct %int2_1924, %int1024_1925, %int1280_1926 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1763 = torch.aten.view %1761, %1762 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight : tensor<1280x1280xf16> | |
%1764 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_v.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1927 = torch.constant.int 0 | |
%int1_1928 = torch.constant.int 1 | |
%1765 = torch.aten.transpose.int %1764, %int0_1927, %int1_1928 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1929 = torch.constant.int 2048 | |
%int1280_1930 = torch.constant.int 1280 | |
%1766 = torch.prim.ListConstruct %int2048_1929, %int1280_1930 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1767 = torch.aten.view %1747, %1766 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1768 = torch.aten.mm %1767, %1765 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1931 = torch.constant.int 2 | |
%int1024_1932 = torch.constant.int 1024 | |
%int1280_1933 = torch.constant.int 1280 | |
%1769 = torch.prim.ListConstruct %int2_1931, %int1024_1932, %int1280_1933 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1770 = torch.aten.view %1768, %1769 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2_1934 = torch.constant.int 2 | |
%int-1_1935 = torch.constant.int -1 | |
%int20_1936 = torch.constant.int 20 | |
%int64_1937 = torch.constant.int 64 | |
%1771 = torch.prim.ListConstruct %int2_1934, %int-1_1935, %int20_1936, %int64_1937 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1772 = torch.aten.view %1756, %1771 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1938 = torch.constant.int 1 | |
%int2_1939 = torch.constant.int 2 | |
%1773 = torch.aten.transpose.int %1772, %int1_1938, %int2_1939 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_1940 = torch.constant.int 2 | |
%int-1_1941 = torch.constant.int -1 | |
%int20_1942 = torch.constant.int 20 | |
%int64_1943 = torch.constant.int 64 | |
%1774 = torch.prim.ListConstruct %int2_1940, %int-1_1941, %int20_1942, %int64_1943 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1775 = torch.aten.view %1763, %1774 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1944 = torch.constant.int 1 | |
%int2_1945 = torch.constant.int 2 | |
%1776 = torch.aten.transpose.int %1775, %int1_1944, %int2_1945 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_1946 = torch.constant.int 2 | |
%int-1_1947 = torch.constant.int -1 | |
%int20_1948 = torch.constant.int 20 | |
%int64_1949 = torch.constant.int 64 | |
%1777 = torch.prim.ListConstruct %int2_1946, %int-1_1947, %int20_1948, %int64_1949 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1778 = torch.aten.view %1770, %1777 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_1950 = torch.constant.int 1 | |
%int2_1951 = torch.constant.int 2 | |
%1779 = torch.aten.transpose.int %1778, %int1_1950, %int2_1951 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%float0.000000e00_1952 = torch.constant.float 0.000000e+00 | |
%false_1953 = torch.constant.bool false | |
%none_1954 = torch.constant.none | |
%none_1955 = torch.constant.none | |
%1780:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1773, %1776, %1779, %float0.000000e00_1952, %false_1953, %none_1954, %none_1955) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%1781 = torch.aten.detach %1780#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_1956 = torch.constant.int 1 | |
%int2_1957 = torch.constant.int 2 | |
%1782 = torch.aten.transpose.int %1780#0, %int1_1956, %int2_1957 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_1958 = torch.constant.int 2 | |
%int-1_1959 = torch.constant.int -1 | |
%int1280_1960 = torch.constant.int 1280 | |
%1783 = torch.prim.ListConstruct %int2_1958, %int-1_1959, %int1280_1960 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1784 = torch.aten.view %1782, %1783 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_1961 = torch.constant.int 2048 | |
%int1280_1962 = torch.constant.int 1280 | |
%1785 = torch.prim.ListConstruct %int2048_1961, %int1280_1962 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1786 = torch.aten.view %1784, %1785 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight : tensor<1280x1280xf16> | |
%1787 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1963 = torch.constant.int 0 | |
%int1_1964 = torch.constant.int 1 | |
%1788 = torch.aten.transpose.int %1787, %int0_1963, %int1_1964 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16> | |
%1789 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_1965 = torch.constant.int 6 | |
%1790 = torch.prims.convert_element_type %1789, %int6_1965 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_1966 = torch.constant.int 6 | |
%1791 = torch.prims.convert_element_type %1786, %int6_1966 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_1967 = torch.constant.int 6 | |
%1792 = torch.prims.convert_element_type %1788, %int6_1967 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%1793 = torch.aten.mm %1791, %1792 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_1968 = torch.constant.int 1 | |
%1794 = torch.aten.mul.Scalar %1793, %int1_1968 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_1969 = torch.constant.int 1 | |
%1795 = torch.aten.mul.Scalar %1790, %int1_1969 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_1970 = torch.constant.int 1 | |
%1796 = torch.aten.add.Tensor %1794, %1795, %int1_1970 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_1971 = torch.constant.int 5 | |
%1797 = torch.prims.convert_element_type %1796, %int5_1971 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_1972 = torch.constant.int 2 | |
%int1024_1973 = torch.constant.int 1024 | |
%int1280_1974 = torch.constant.int 1280 | |
%1798 = torch.prim.ListConstruct %int2_1972, %int1024_1973, %int1280_1974 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1799 = torch.aten.view %1797, %1798 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_1975 = torch.constant.none | |
%1800 = torch.aten.clone %1799, %none_1975 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_1976 = torch.constant.float 1.000000e+00 | |
%1801 = torch.aten.div.Scalar %1800, %float1.000000e00_1976 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_1977 = torch.constant.int 1 | |
%1802 = torch.aten.add.Tensor %1801, %1736, %int1_1977 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_1978 = torch.constant.int 6 | |
%1803 = torch.prims.convert_element_type %1802, %int6_1978 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_1979 = torch.constant.int 2 | |
%1804 = torch.prim.ListConstruct %int2_1979 : (!torch.int) -> !torch.list<int> | |
%int0_1980 = torch.constant.int 0 | |
%true_1981 = torch.constant.bool true | |
%result0_1982, %result1_1983 = torch.aten.var_mean.correction %1803, %1804, %int0_1980, %true_1981 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_1984 = torch.constant.float 1.000000e-05 | |
%int1_1985 = torch.constant.int 1 | |
%1805 = torch.aten.add.Scalar %result0_1982, %float1.000000e-05_1984, %int1_1985 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1806 = torch.aten.rsqrt %1805 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_1986 = torch.constant.int 1 | |
%1807 = torch.aten.sub.Tensor %1802, %result1_1983, %int1_1986 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1808 = torch.aten.mul.Tensor %1807, %1806 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight : tensor<1280xf16> | |
%1809 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1810 = torch.aten.mul.Tensor %1808, %1809 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias : tensor<1280xf16> | |
%1811 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_1987 = torch.constant.int 1 | |
%1812 = torch.aten.add.Tensor %1810, %1811, %int1_1987 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_1988 = torch.constant.int 5 | |
%1813 = torch.prims.convert_element_type %1812, %int5_1988 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_1989 = torch.constant.int 5 | |
%1814 = torch.prims.convert_element_type %result1_1983, %int5_1989 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_1990 = torch.constant.int 5 | |
%1815 = torch.prims.convert_element_type %1806, %int5_1990 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight : tensor<1280x1280xf16> | |
%1816 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_1991 = torch.constant.int 0 | |
%int1_1992 = torch.constant.int 1 | |
%1817 = torch.aten.transpose.int %1816, %int0_1991, %int1_1992 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_1993 = torch.constant.int 2048 | |
%int1280_1994 = torch.constant.int 1280 | |
%1818 = torch.prim.ListConstruct %int2048_1993, %int1280_1994 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1819 = torch.aten.view %1813, %1818 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1820 = torch.aten.mm %1819, %1817 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_1995 = torch.constant.int 2 | |
%int1024_1996 = torch.constant.int 1024 | |
%int1280_1997 = torch.constant.int 1280 | |
%1821 = torch.prim.ListConstruct %int2_1995, %int1024_1996, %int1280_1997 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1822 = torch.aten.view %1820, %1821 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight : tensor<1280x2048xf16> | |
%1823 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_k.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_1998 = torch.constant.int 0 | |
%int1_1999 = torch.constant.int 1 | |
%1824 = torch.aten.transpose.int %1823, %int0_1998, %int1_1999 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2000 = torch.constant.int 128 | |
%int2048_2001 = torch.constant.int 2048 | |
%1825 = torch.prim.ListConstruct %int128_2000, %int2048_2001 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1826 = torch.aten.view %arg1, %1825 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1827 = torch.aten.mm %1826, %1824 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2002 = torch.constant.int 2 | |
%int64_2003 = torch.constant.int 64 | |
%int1280_2004 = torch.constant.int 1280 | |
%1828 = torch.prim.ListConstruct %int2_2002, %int64_2003, %int1280_2004 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1829 = torch.aten.view %1827, %1828 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight : tensor<1280x2048xf16> | |
%1830 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_v.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2005 = torch.constant.int 0 | |
%int1_2006 = torch.constant.int 1 | |
%1831 = torch.aten.transpose.int %1830, %int0_2005, %int1_2006 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2007 = torch.constant.int 128 | |
%int2048_2008 = torch.constant.int 2048 | |
%1832 = torch.prim.ListConstruct %int128_2007, %int2048_2008 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1833 = torch.aten.view %arg1, %1832 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%1834 = torch.aten.mm %1833, %1831 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2009 = torch.constant.int 2 | |
%int64_2010 = torch.constant.int 64 | |
%int1280_2011 = torch.constant.int 1280 | |
%1835 = torch.prim.ListConstruct %int2_2009, %int64_2010, %int1280_2011 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1836 = torch.aten.view %1834, %1835 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%int2_2012 = torch.constant.int 2 | |
%int-1_2013 = torch.constant.int -1 | |
%int20_2014 = torch.constant.int 20 | |
%int64_2015 = torch.constant.int 64 | |
%1837 = torch.prim.ListConstruct %int2_2012, %int-1_2013, %int20_2014, %int64_2015 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1838 = torch.aten.view %1822, %1837 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2016 = torch.constant.int 1 | |
%int2_2017 = torch.constant.int 2 | |
%1839 = torch.aten.transpose.int %1838, %int1_2016, %int2_2017 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2018 = torch.constant.int 2 | |
%int-1_2019 = torch.constant.int -1 | |
%int20_2020 = torch.constant.int 20 | |
%int64_2021 = torch.constant.int 64 | |
%1840 = torch.prim.ListConstruct %int2_2018, %int-1_2019, %int20_2020, %int64_2021 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1841 = torch.aten.view %1829, %1840 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2022 = torch.constant.int 1 | |
%int2_2023 = torch.constant.int 2 | |
%1842 = torch.aten.transpose.int %1841, %int1_2022, %int2_2023 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%int2_2024 = torch.constant.int 2 | |
%int-1_2025 = torch.constant.int -1 | |
%int20_2026 = torch.constant.int 20 | |
%int64_2027 = torch.constant.int 64 | |
%1843 = torch.prim.ListConstruct %int2_2024, %int-1_2025, %int20_2026, %int64_2027 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1844 = torch.aten.view %1836, %1843 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2028 = torch.constant.int 1 | |
%int2_2029 = torch.constant.int 2 | |
%1845 = torch.aten.transpose.int %1844, %int1_2028, %int2_2029 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%float0.000000e00_2030 = torch.constant.float 0.000000e+00 | |
%false_2031 = torch.constant.bool false | |
%none_2032 = torch.constant.none | |
%none_2033 = torch.constant.none | |
%1846:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1839, %1842, %1845, %float0.000000e00_2030, %false_2031, %none_2032, %none_2033) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%1847 = torch.aten.detach %1846#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2034 = torch.constant.int 1 | |
%int2_2035 = torch.constant.int 2 | |
%1848 = torch.aten.transpose.int %1846#0, %int1_2034, %int2_2035 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2036 = torch.constant.int 2 | |
%int-1_2037 = torch.constant.int -1 | |
%int1280_2038 = torch.constant.int 1280 | |
%1849 = torch.prim.ListConstruct %int2_2036, %int-1_2037, %int1280_2038 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1850 = torch.aten.view %1848, %1849 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2039 = torch.constant.int 2048 | |
%int1280_2040 = torch.constant.int 1280 | |
%1851 = torch.prim.ListConstruct %int2048_2039, %int1280_2040 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1852 = torch.aten.view %1850, %1851 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight : tensor<1280x1280xf16> | |
%1853 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2041 = torch.constant.int 0 | |
%int1_2042 = torch.constant.int 1 | |
%1854 = torch.aten.transpose.int %1853, %int0_2041, %int1_2042 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16> | |
%1855 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2043 = torch.constant.int 6 | |
%1856 = torch.prims.convert_element_type %1855, %int6_2043 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2044 = torch.constant.int 6 | |
%1857 = torch.prims.convert_element_type %1852, %int6_2044 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2045 = torch.constant.int 6 | |
%1858 = torch.prims.convert_element_type %1854, %int6_2045 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%1859 = torch.aten.mm %1857, %1858 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2046 = torch.constant.int 1 | |
%1860 = torch.aten.mul.Scalar %1859, %int1_2046 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2047 = torch.constant.int 1 | |
%1861 = torch.aten.mul.Scalar %1856, %int1_2047 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2048 = torch.constant.int 1 | |
%1862 = torch.aten.add.Tensor %1860, %1861, %int1_2048 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2049 = torch.constant.int 5 | |
%1863 = torch.prims.convert_element_type %1862, %int5_2049 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2050 = torch.constant.int 2 | |
%int1024_2051 = torch.constant.int 1024 | |
%int1280_2052 = torch.constant.int 1280 | |
%1864 = torch.prim.ListConstruct %int2_2050, %int1024_2051, %int1280_2052 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1865 = torch.aten.view %1863, %1864 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2053 = torch.constant.none | |
%1866 = torch.aten.clone %1865, %none_2053 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2054 = torch.constant.float 1.000000e+00 | |
%1867 = torch.aten.div.Scalar %1866, %float1.000000e00_2054 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2055 = torch.constant.int 1 | |
%1868 = torch.aten.add.Tensor %1867, %1802, %int1_2055 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2056 = torch.constant.int 6 | |
%1869 = torch.prims.convert_element_type %1868, %int6_2056 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2057 = torch.constant.int 2 | |
%1870 = torch.prim.ListConstruct %int2_2057 : (!torch.int) -> !torch.list<int> | |
%int0_2058 = torch.constant.int 0 | |
%true_2059 = torch.constant.bool true | |
%result0_2060, %result1_2061 = torch.aten.var_mean.correction %1869, %1870, %int0_2058, %true_2059 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2062 = torch.constant.float 1.000000e-05 | |
%int1_2063 = torch.constant.int 1 | |
%1871 = torch.aten.add.Scalar %result0_2060, %float1.000000e-05_2062, %int1_2063 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1872 = torch.aten.rsqrt %1871 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2064 = torch.constant.int 1 | |
%1873 = torch.aten.sub.Tensor %1868, %result1_2061, %int1_2064 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1874 = torch.aten.mul.Tensor %1873, %1872 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight : tensor<1280xf16> | |
%1875 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1876 = torch.aten.mul.Tensor %1874, %1875 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias : tensor<1280xf16> | |
%1877 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2065 = torch.constant.int 1 | |
%1878 = torch.aten.add.Tensor %1876, %1877, %int1_2065 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2066 = torch.constant.int 5 | |
%1879 = torch.prims.convert_element_type %1878, %int5_2066 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2067 = torch.constant.int 5 | |
%1880 = torch.prims.convert_element_type %result1_2061, %int5_2067 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2068 = torch.constant.int 5 | |
%1881 = torch.prims.convert_element_type %1872, %int5_2068 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int2048_2069 = torch.constant.int 2048 | |
%int1280_2070 = torch.constant.int 1280 | |
%1882 = torch.prim.ListConstruct %int2048_2069, %int1280_2070 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1883 = torch.aten.view %1879, %1882 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight : tensor<10240x1280xf16> | |
%1884 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight : tensor<10240x1280xf16> -> !torch.vtensor<[10240,1280],f16> | |
%int0_2071 = torch.constant.int 0 | |
%int1_2072 = torch.constant.int 1 | |
%1885 = torch.aten.transpose.int %1884, %int0_2071, %int1_2072 : !torch.vtensor<[10240,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,10240],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16> | |
%1886 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias : tensor<10240xf16> -> !torch.vtensor<[10240],f16> | |
%int6_2073 = torch.constant.int 6 | |
%1887 = torch.prims.convert_element_type %1886, %int6_2073 : !torch.vtensor<[10240],f16>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int6_2074 = torch.constant.int 6 | |
%1888 = torch.prims.convert_element_type %1883, %int6_2074 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2075 = torch.constant.int 6 | |
%1889 = torch.prims.convert_element_type %1885, %int6_2075 : !torch.vtensor<[1280,10240],f16>, !torch.int -> !torch.vtensor<[1280,10240],f32> | |
%1890 = torch.aten.mm %1888, %1889 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,10240],f32> -> !torch.vtensor<[2048,10240],f32> | |
%int1_2076 = torch.constant.int 1 | |
%1891 = torch.aten.mul.Scalar %1890, %int1_2076 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int1_2077 = torch.constant.int 1 | |
%1892 = torch.aten.mul.Scalar %1887, %int1_2077 : !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int1_2078 = torch.constant.int 1 | |
%1893 = torch.aten.add.Tensor %1891, %1892, %int1_2078 : !torch.vtensor<[2048,10240],f32>, !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int5_2079 = torch.constant.int 5 | |
%1894 = torch.prims.convert_element_type %1893, %int5_2079 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f16> | |
%int2_2080 = torch.constant.int 2 | |
%int1024_2081 = torch.constant.int 1024 | |
%int10240_2082 = torch.constant.int 10240 | |
%1895 = torch.prim.ListConstruct %int2_2080, %int1024_2081, %int10240_2082 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1896 = torch.aten.view %1894, %1895 : !torch.vtensor<[2048,10240],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,10240],f16> | |
%int-1_2083 = torch.constant.int -1 | |
%int0_2084 = torch.constant.int 0 | |
%int5120_2085 = torch.constant.int 5120 | |
%int1_2086 = torch.constant.int 1 | |
%1897 = torch.aten.slice.Tensor %1896, %int-1_2083, %int0_2084, %int5120_2085, %int1_2086 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%int-1_2087 = torch.constant.int -1 | |
%int5120_2088 = torch.constant.int 5120 | |
%int10240_2089 = torch.constant.int 10240 | |
%int1_2090 = torch.constant.int 1 | |
%1898 = torch.aten.slice.Tensor %1896, %int-1_2087, %int5120_2088, %int10240_2089, %int1_2090 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%str_2091 = torch.constant.str "none" | |
%1899 = torch.aten.gelu %1898, %str_2091 : !torch.vtensor<[2,1024,5120],f16>, !torch.str -> !torch.vtensor<[2,1024,5120],f16> | |
%1900 = torch.aten.mul.Tensor %1897, %1899 : !torch.vtensor<[2,1024,5120],f16>, !torch.vtensor<[2,1024,5120],f16> -> !torch.vtensor<[2,1024,5120],f16> | |
%none_2092 = torch.constant.none | |
%1901 = torch.aten.clone %1900, %none_2092 : !torch.vtensor<[2,1024,5120],f16>, !torch.none -> !torch.vtensor<[2,1024,5120],f16> | |
%int2048_2093 = torch.constant.int 2048 | |
%int5120_2094 = torch.constant.int 5120 | |
%1902 = torch.prim.ListConstruct %int2048_2093, %int5120_2094 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1903 = torch.aten.view %1901, %1902 : !torch.vtensor<[2,1024,5120],f16>, !torch.list<int> -> !torch.vtensor<[2048,5120],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight : tensor<1280x5120xf16> | |
%1904 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight : tensor<1280x5120xf16> -> !torch.vtensor<[1280,5120],f16> | |
%int0_2095 = torch.constant.int 0 | |
%int1_2096 = torch.constant.int 1 | |
%1905 = torch.aten.transpose.int %1904, %int0_2095, %int1_2096 : !torch.vtensor<[1280,5120],f16>, !torch.int, !torch.int -> !torch.vtensor<[5120,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16> | |
%1906 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2097 = torch.constant.int 6 | |
%1907 = torch.prims.convert_element_type %1906, %int6_2097 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2098 = torch.constant.int 6 | |
%1908 = torch.prims.convert_element_type %1903, %int6_2098 : !torch.vtensor<[2048,5120],f16>, !torch.int -> !torch.vtensor<[2048,5120],f32> | |
%int6_2099 = torch.constant.int 6 | |
%1909 = torch.prims.convert_element_type %1905, %int6_2099 : !torch.vtensor<[5120,1280],f16>, !torch.int -> !torch.vtensor<[5120,1280],f32> | |
%1910 = torch.aten.mm %1908, %1909 : !torch.vtensor<[2048,5120],f32>, !torch.vtensor<[5120,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2100 = torch.constant.int 1 | |
%1911 = torch.aten.mul.Scalar %1910, %int1_2100 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2101 = torch.constant.int 1 | |
%1912 = torch.aten.mul.Scalar %1907, %int1_2101 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2102 = torch.constant.int 1 | |
%1913 = torch.aten.add.Tensor %1911, %1912, %int1_2102 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2103 = torch.constant.int 5 | |
%1914 = torch.prims.convert_element_type %1913, %int5_2103 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2104 = torch.constant.int 2 | |
%int1024_2105 = torch.constant.int 1024 | |
%int1280_2106 = torch.constant.int 1280 | |
%1915 = torch.prim.ListConstruct %int2_2104, %int1024_2105, %int1280_2106 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1916 = torch.aten.view %1914, %1915 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2107 = torch.constant.int 1 | |
%1917 = torch.aten.add.Tensor %1916, %1868, %int1_2107 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2108 = torch.constant.int 6 | |
%1918 = torch.prims.convert_element_type %1917, %int6_2108 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2109 = torch.constant.int 2 | |
%1919 = torch.prim.ListConstruct %int2_2109 : (!torch.int) -> !torch.list<int> | |
%int0_2110 = torch.constant.int 0 | |
%true_2111 = torch.constant.bool true | |
%result0_2112, %result1_2113 = torch.aten.var_mean.correction %1918, %1919, %int0_2110, %true_2111 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2114 = torch.constant.float 1.000000e-05 | |
%int1_2115 = torch.constant.int 1 | |
%1920 = torch.aten.add.Scalar %result0_2112, %float1.000000e-05_2114, %int1_2115 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1921 = torch.aten.rsqrt %1920 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2116 = torch.constant.int 1 | |
%1922 = torch.aten.sub.Tensor %1917, %result1_2113, %int1_2116 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1923 = torch.aten.mul.Tensor %1922, %1921 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight : tensor<1280xf16> | |
%1924 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1925 = torch.aten.mul.Tensor %1923, %1924 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias : tensor<1280xf16> | |
%1926 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2117 = torch.constant.int 1 | |
%1927 = torch.aten.add.Tensor %1925, %1926, %int1_2117 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2118 = torch.constant.int 5 | |
%1928 = torch.prims.convert_element_type %1927, %int5_2118 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2119 = torch.constant.int 5 | |
%1929 = torch.prims.convert_element_type %result1_2113, %int5_2119 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2120 = torch.constant.int 5 | |
%1930 = torch.prims.convert_element_type %1921, %int5_2120 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight : tensor<1280x1280xf16> | |
%1931 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2121 = torch.constant.int 0 | |
%int1_2122 = torch.constant.int 1 | |
%1932 = torch.aten.transpose.int %1931, %int0_2121, %int1_2122 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2123 = torch.constant.int 2048 | |
%int1280_2124 = torch.constant.int 1280 | |
%1933 = torch.prim.ListConstruct %int2048_2123, %int1280_2124 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1934 = torch.aten.view %1928, %1933 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1935 = torch.aten.mm %1934, %1932 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2125 = torch.constant.int 2 | |
%int1024_2126 = torch.constant.int 1024 | |
%int1280_2127 = torch.constant.int 1280 | |
%1936 = torch.prim.ListConstruct %int2_2125, %int1024_2126, %int1280_2127 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1937 = torch.aten.view %1935, %1936 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight : tensor<1280x1280xf16> | |
%1938 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_k.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2128 = torch.constant.int 0 | |
%int1_2129 = torch.constant.int 1 | |
%1939 = torch.aten.transpose.int %1938, %int0_2128, %int1_2129 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2130 = torch.constant.int 2048 | |
%int1280_2131 = torch.constant.int 1280 | |
%1940 = torch.prim.ListConstruct %int2048_2130, %int1280_2131 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1941 = torch.aten.view %1928, %1940 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1942 = torch.aten.mm %1941, %1939 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2132 = torch.constant.int 2 | |
%int1024_2133 = torch.constant.int 1024 | |
%int1280_2134 = torch.constant.int 1280 | |
%1943 = torch.prim.ListConstruct %int2_2132, %int1024_2133, %int1280_2134 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1944 = torch.aten.view %1942, %1943 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight : tensor<1280x1280xf16> | |
%1945 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_v.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2135 = torch.constant.int 0 | |
%int1_2136 = torch.constant.int 1 | |
%1946 = torch.aten.transpose.int %1945, %int0_2135, %int1_2136 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2137 = torch.constant.int 2048 | |
%int1280_2138 = torch.constant.int 1280 | |
%1947 = torch.prim.ListConstruct %int2048_2137, %int1280_2138 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1948 = torch.aten.view %1928, %1947 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%1949 = torch.aten.mm %1948, %1946 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2139 = torch.constant.int 2 | |
%int1024_2140 = torch.constant.int 1024 | |
%int1280_2141 = torch.constant.int 1280 | |
%1950 = torch.prim.ListConstruct %int2_2139, %int1024_2140, %int1280_2141 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1951 = torch.aten.view %1949, %1950 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2_2142 = torch.constant.int 2 | |
%int-1_2143 = torch.constant.int -1 | |
%int20_2144 = torch.constant.int 20 | |
%int64_2145 = torch.constant.int 64 | |
%1952 = torch.prim.ListConstruct %int2_2142, %int-1_2143, %int20_2144, %int64_2145 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1953 = torch.aten.view %1937, %1952 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2146 = torch.constant.int 1 | |
%int2_2147 = torch.constant.int 2 | |
%1954 = torch.aten.transpose.int %1953, %int1_2146, %int2_2147 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2148 = torch.constant.int 2 | |
%int-1_2149 = torch.constant.int -1 | |
%int20_2150 = torch.constant.int 20 | |
%int64_2151 = torch.constant.int 64 | |
%1955 = torch.prim.ListConstruct %int2_2148, %int-1_2149, %int20_2150, %int64_2151 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1956 = torch.aten.view %1944, %1955 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2152 = torch.constant.int 1 | |
%int2_2153 = torch.constant.int 2 | |
%1957 = torch.aten.transpose.int %1956, %int1_2152, %int2_2153 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2154 = torch.constant.int 2 | |
%int-1_2155 = torch.constant.int -1 | |
%int20_2156 = torch.constant.int 20 | |
%int64_2157 = torch.constant.int 64 | |
%1958 = torch.prim.ListConstruct %int2_2154, %int-1_2155, %int20_2156, %int64_2157 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1959 = torch.aten.view %1951, %1958 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2158 = torch.constant.int 1 | |
%int2_2159 = torch.constant.int 2 | |
%1960 = torch.aten.transpose.int %1959, %int1_2158, %int2_2159 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%float0.000000e00_2160 = torch.constant.float 0.000000e+00 | |
%false_2161 = torch.constant.bool false | |
%none_2162 = torch.constant.none | |
%none_2163 = torch.constant.none | |
%1961:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%1954, %1957, %1960, %float0.000000e00_2160, %false_2161, %none_2162, %none_2163) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%1962 = torch.aten.detach %1961#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2164 = torch.constant.int 1 | |
%int2_2165 = torch.constant.int 2 | |
%1963 = torch.aten.transpose.int %1961#0, %int1_2164, %int2_2165 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2166 = torch.constant.int 2 | |
%int-1_2167 = torch.constant.int -1 | |
%int1280_2168 = torch.constant.int 1280 | |
%1964 = torch.prim.ListConstruct %int2_2166, %int-1_2167, %int1280_2168 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1965 = torch.aten.view %1963, %1964 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2169 = torch.constant.int 2048 | |
%int1280_2170 = torch.constant.int 1280 | |
%1966 = torch.prim.ListConstruct %int2048_2169, %int1280_2170 : (!torch.int, !torch.int) -> !torch.list<int> | |
%1967 = torch.aten.view %1965, %1966 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight : tensor<1280x1280xf16> | |
%1968 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2171 = torch.constant.int 0 | |
%int1_2172 = torch.constant.int 1 | |
%1969 = torch.aten.transpose.int %1968, %int0_2171, %int1_2172 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16> | |
%1970 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2173 = torch.constant.int 6 | |
%1971 = torch.prims.convert_element_type %1970, %int6_2173 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2174 = torch.constant.int 6 | |
%1972 = torch.prims.convert_element_type %1967, %int6_2174 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2175 = torch.constant.int 6 | |
%1973 = torch.prims.convert_element_type %1969, %int6_2175 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%1974 = torch.aten.mm %1972, %1973 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2176 = torch.constant.int 1 | |
%1975 = torch.aten.mul.Scalar %1974, %int1_2176 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2177 = torch.constant.int 1 | |
%1976 = torch.aten.mul.Scalar %1971, %int1_2177 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2178 = torch.constant.int 1 | |
%1977 = torch.aten.add.Tensor %1975, %1976, %int1_2178 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2179 = torch.constant.int 5 | |
%1978 = torch.prims.convert_element_type %1977, %int5_2179 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2180 = torch.constant.int 2 | |
%int1024_2181 = torch.constant.int 1024 | |
%int1280_2182 = torch.constant.int 1280 | |
%1979 = torch.prim.ListConstruct %int2_2180, %int1024_2181, %int1280_2182 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%1980 = torch.aten.view %1978, %1979 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2183 = torch.constant.none | |
%1981 = torch.aten.clone %1980, %none_2183 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2184 = torch.constant.float 1.000000e+00 | |
%1982 = torch.aten.div.Scalar %1981, %float1.000000e00_2184 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2185 = torch.constant.int 1 | |
%1983 = torch.aten.add.Tensor %1982, %1917, %int1_2185 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2186 = torch.constant.int 6 | |
%1984 = torch.prims.convert_element_type %1983, %int6_2186 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2187 = torch.constant.int 2 | |
%1985 = torch.prim.ListConstruct %int2_2187 : (!torch.int) -> !torch.list<int> | |
%int0_2188 = torch.constant.int 0 | |
%true_2189 = torch.constant.bool true | |
%result0_2190, %result1_2191 = torch.aten.var_mean.correction %1984, %1985, %int0_2188, %true_2189 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2192 = torch.constant.float 1.000000e-05 | |
%int1_2193 = torch.constant.int 1 | |
%1986 = torch.aten.add.Scalar %result0_2190, %float1.000000e-05_2192, %int1_2193 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%1987 = torch.aten.rsqrt %1986 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2194 = torch.constant.int 1 | |
%1988 = torch.aten.sub.Tensor %1983, %result1_2191, %int1_2194 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%1989 = torch.aten.mul.Tensor %1988, %1987 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight : tensor<1280xf16> | |
%1990 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%1991 = torch.aten.mul.Tensor %1989, %1990 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias : tensor<1280xf16> | |
%1992 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2195 = torch.constant.int 1 | |
%1993 = torch.aten.add.Tensor %1991, %1992, %int1_2195 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2196 = torch.constant.int 5 | |
%1994 = torch.prims.convert_element_type %1993, %int5_2196 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2197 = torch.constant.int 5 | |
%1995 = torch.prims.convert_element_type %result1_2191, %int5_2197 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2198 = torch.constant.int 5 | |
%1996 = torch.prims.convert_element_type %1987, %int5_2198 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight : tensor<1280x1280xf16> | |
%1997 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2199 = torch.constant.int 0 | |
%int1_2200 = torch.constant.int 1 | |
%1998 = torch.aten.transpose.int %1997, %int0_2199, %int1_2200 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2201 = torch.constant.int 2048 | |
%int1280_2202 = torch.constant.int 1280 | |
%1999 = torch.prim.ListConstruct %int2048_2201, %int1280_2202 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2000 = torch.aten.view %1994, %1999 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2001 = torch.aten.mm %2000, %1998 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2203 = torch.constant.int 2 | |
%int1024_2204 = torch.constant.int 1024 | |
%int1280_2205 = torch.constant.int 1280 | |
%2002 = torch.prim.ListConstruct %int2_2203, %int1024_2204, %int1280_2205 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2003 = torch.aten.view %2001, %2002 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight : tensor<1280x2048xf16> | |
%2004 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_k.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2206 = torch.constant.int 0 | |
%int1_2207 = torch.constant.int 1 | |
%2005 = torch.aten.transpose.int %2004, %int0_2206, %int1_2207 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2208 = torch.constant.int 128 | |
%int2048_2209 = torch.constant.int 2048 | |
%2006 = torch.prim.ListConstruct %int128_2208, %int2048_2209 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2007 = torch.aten.view %arg1, %2006 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%2008 = torch.aten.mm %2007, %2005 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2210 = torch.constant.int 2 | |
%int64_2211 = torch.constant.int 64 | |
%int1280_2212 = torch.constant.int 1280 | |
%2009 = torch.prim.ListConstruct %int2_2210, %int64_2211, %int1280_2212 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2010 = torch.aten.view %2008, %2009 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight : tensor<1280x2048xf16> | |
%2011 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_v.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2213 = torch.constant.int 0 | |
%int1_2214 = torch.constant.int 1 | |
%2012 = torch.aten.transpose.int %2011, %int0_2213, %int1_2214 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2215 = torch.constant.int 128 | |
%int2048_2216 = torch.constant.int 2048 | |
%2013 = torch.prim.ListConstruct %int128_2215, %int2048_2216 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2014 = torch.aten.view %arg1, %2013 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%2015 = torch.aten.mm %2014, %2012 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2217 = torch.constant.int 2 | |
%int64_2218 = torch.constant.int 64 | |
%int1280_2219 = torch.constant.int 1280 | |
%2016 = torch.prim.ListConstruct %int2_2217, %int64_2218, %int1280_2219 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2017 = torch.aten.view %2015, %2016 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%int2_2220 = torch.constant.int 2 | |
%int-1_2221 = torch.constant.int -1 | |
%int20_2222 = torch.constant.int 20 | |
%int64_2223 = torch.constant.int 64 | |
%2018 = torch.prim.ListConstruct %int2_2220, %int-1_2221, %int20_2222, %int64_2223 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2019 = torch.aten.view %2003, %2018 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2224 = torch.constant.int 1 | |
%int2_2225 = torch.constant.int 2 | |
%2020 = torch.aten.transpose.int %2019, %int1_2224, %int2_2225 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2226 = torch.constant.int 2 | |
%int-1_2227 = torch.constant.int -1 | |
%int20_2228 = torch.constant.int 20 | |
%int64_2229 = torch.constant.int 64 | |
%2021 = torch.prim.ListConstruct %int2_2226, %int-1_2227, %int20_2228, %int64_2229 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2022 = torch.aten.view %2010, %2021 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2230 = torch.constant.int 1 | |
%int2_2231 = torch.constant.int 2 | |
%2023 = torch.aten.transpose.int %2022, %int1_2230, %int2_2231 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%int2_2232 = torch.constant.int 2 | |
%int-1_2233 = torch.constant.int -1 | |
%int20_2234 = torch.constant.int 20 | |
%int64_2235 = torch.constant.int 64 | |
%2024 = torch.prim.ListConstruct %int2_2232, %int-1_2233, %int20_2234, %int64_2235 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2025 = torch.aten.view %2017, %2024 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2236 = torch.constant.int 1 | |
%int2_2237 = torch.constant.int 2 | |
%2026 = torch.aten.transpose.int %2025, %int1_2236, %int2_2237 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%float0.000000e00_2238 = torch.constant.float 0.000000e+00 | |
%false_2239 = torch.constant.bool false | |
%none_2240 = torch.constant.none | |
%none_2241 = torch.constant.none | |
%2027:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%2020, %2023, %2026, %float0.000000e00_2238, %false_2239, %none_2240, %none_2241) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%2028 = torch.aten.detach %2027#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2242 = torch.constant.int 1 | |
%int2_2243 = torch.constant.int 2 | |
%2029 = torch.aten.transpose.int %2027#0, %int1_2242, %int2_2243 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2244 = torch.constant.int 2 | |
%int-1_2245 = torch.constant.int -1 | |
%int1280_2246 = torch.constant.int 1280 | |
%2030 = torch.prim.ListConstruct %int2_2244, %int-1_2245, %int1280_2246 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2031 = torch.aten.view %2029, %2030 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2247 = torch.constant.int 2048 | |
%int1280_2248 = torch.constant.int 1280 | |
%2032 = torch.prim.ListConstruct %int2048_2247, %int1280_2248 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2033 = torch.aten.view %2031, %2032 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight : tensor<1280x1280xf16> | |
%2034 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2249 = torch.constant.int 0 | |
%int1_2250 = torch.constant.int 1 | |
%2035 = torch.aten.transpose.int %2034, %int0_2249, %int1_2250 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16> | |
%2036 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2251 = torch.constant.int 6 | |
%2037 = torch.prims.convert_element_type %2036, %int6_2251 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2252 = torch.constant.int 6 | |
%2038 = torch.prims.convert_element_type %2033, %int6_2252 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2253 = torch.constant.int 6 | |
%2039 = torch.prims.convert_element_type %2035, %int6_2253 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%2040 = torch.aten.mm %2038, %2039 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2254 = torch.constant.int 1 | |
%2041 = torch.aten.mul.Scalar %2040, %int1_2254 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2255 = torch.constant.int 1 | |
%2042 = torch.aten.mul.Scalar %2037, %int1_2255 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2256 = torch.constant.int 1 | |
%2043 = torch.aten.add.Tensor %2041, %2042, %int1_2256 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2257 = torch.constant.int 5 | |
%2044 = torch.prims.convert_element_type %2043, %int5_2257 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2258 = torch.constant.int 2 | |
%int1024_2259 = torch.constant.int 1024 | |
%int1280_2260 = torch.constant.int 1280 | |
%2045 = torch.prim.ListConstruct %int2_2258, %int1024_2259, %int1280_2260 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2046 = torch.aten.view %2044, %2045 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2261 = torch.constant.none | |
%2047 = torch.aten.clone %2046, %none_2261 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2262 = torch.constant.float 1.000000e+00 | |
%2048 = torch.aten.div.Scalar %2047, %float1.000000e00_2262 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2263 = torch.constant.int 1 | |
%2049 = torch.aten.add.Tensor %2048, %1983, %int1_2263 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2264 = torch.constant.int 6 | |
%2050 = torch.prims.convert_element_type %2049, %int6_2264 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2265 = torch.constant.int 2 | |
%2051 = torch.prim.ListConstruct %int2_2265 : (!torch.int) -> !torch.list<int> | |
%int0_2266 = torch.constant.int 0 | |
%true_2267 = torch.constant.bool true | |
%result0_2268, %result1_2269 = torch.aten.var_mean.correction %2050, %2051, %int0_2266, %true_2267 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2270 = torch.constant.float 1.000000e-05 | |
%int1_2271 = torch.constant.int 1 | |
%2052 = torch.aten.add.Scalar %result0_2268, %float1.000000e-05_2270, %int1_2271 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2053 = torch.aten.rsqrt %2052 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2272 = torch.constant.int 1 | |
%2054 = torch.aten.sub.Tensor %2049, %result1_2269, %int1_2272 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2055 = torch.aten.mul.Tensor %2054, %2053 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight : tensor<1280xf16> | |
%2056 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2057 = torch.aten.mul.Tensor %2055, %2056 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias : tensor<1280xf16> | |
%2058 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2273 = torch.constant.int 1 | |
%2059 = torch.aten.add.Tensor %2057, %2058, %int1_2273 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2274 = torch.constant.int 5 | |
%2060 = torch.prims.convert_element_type %2059, %int5_2274 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2275 = torch.constant.int 5 | |
%2061 = torch.prims.convert_element_type %result1_2269, %int5_2275 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2276 = torch.constant.int 5 | |
%2062 = torch.prims.convert_element_type %2053, %int5_2276 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int2048_2277 = torch.constant.int 2048 | |
%int1280_2278 = torch.constant.int 1280 | |
%2063 = torch.prim.ListConstruct %int2048_2277, %int1280_2278 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2064 = torch.aten.view %2060, %2063 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight : tensor<10240x1280xf16> | |
%2065 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight : tensor<10240x1280xf16> -> !torch.vtensor<[10240,1280],f16> | |
%int0_2279 = torch.constant.int 0 | |
%int1_2280 = torch.constant.int 1 | |
%2066 = torch.aten.transpose.int %2065, %int0_2279, %int1_2280 : !torch.vtensor<[10240,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,10240],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16> | |
%2067 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias : tensor<10240xf16> -> !torch.vtensor<[10240],f16> | |
%int6_2281 = torch.constant.int 6 | |
%2068 = torch.prims.convert_element_type %2067, %int6_2281 : !torch.vtensor<[10240],f16>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int6_2282 = torch.constant.int 6 | |
%2069 = torch.prims.convert_element_type %2064, %int6_2282 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2283 = torch.constant.int 6 | |
%2070 = torch.prims.convert_element_type %2066, %int6_2283 : !torch.vtensor<[1280,10240],f16>, !torch.int -> !torch.vtensor<[1280,10240],f32> | |
%2071 = torch.aten.mm %2069, %2070 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,10240],f32> -> !torch.vtensor<[2048,10240],f32> | |
%int1_2284 = torch.constant.int 1 | |
%2072 = torch.aten.mul.Scalar %2071, %int1_2284 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int1_2285 = torch.constant.int 1 | |
%2073 = torch.aten.mul.Scalar %2068, %int1_2285 : !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int1_2286 = torch.constant.int 1 | |
%2074 = torch.aten.add.Tensor %2072, %2073, %int1_2286 : !torch.vtensor<[2048,10240],f32>, !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int5_2287 = torch.constant.int 5 | |
%2075 = torch.prims.convert_element_type %2074, %int5_2287 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f16> | |
%int2_2288 = torch.constant.int 2 | |
%int1024_2289 = torch.constant.int 1024 | |
%int10240_2290 = torch.constant.int 10240 | |
%2076 = torch.prim.ListConstruct %int2_2288, %int1024_2289, %int10240_2290 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2077 = torch.aten.view %2075, %2076 : !torch.vtensor<[2048,10240],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,10240],f16> | |
%int-1_2291 = torch.constant.int -1 | |
%int0_2292 = torch.constant.int 0 | |
%int5120_2293 = torch.constant.int 5120 | |
%int1_2294 = torch.constant.int 1 | |
%2078 = torch.aten.slice.Tensor %2077, %int-1_2291, %int0_2292, %int5120_2293, %int1_2294 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%int-1_2295 = torch.constant.int -1 | |
%int5120_2296 = torch.constant.int 5120 | |
%int10240_2297 = torch.constant.int 10240 | |
%int1_2298 = torch.constant.int 1 | |
%2079 = torch.aten.slice.Tensor %2077, %int-1_2295, %int5120_2296, %int10240_2297, %int1_2298 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%str_2299 = torch.constant.str "none" | |
%2080 = torch.aten.gelu %2079, %str_2299 : !torch.vtensor<[2,1024,5120],f16>, !torch.str -> !torch.vtensor<[2,1024,5120],f16> | |
%2081 = torch.aten.mul.Tensor %2078, %2080 : !torch.vtensor<[2,1024,5120],f16>, !torch.vtensor<[2,1024,5120],f16> -> !torch.vtensor<[2,1024,5120],f16> | |
%none_2300 = torch.constant.none | |
%2082 = torch.aten.clone %2081, %none_2300 : !torch.vtensor<[2,1024,5120],f16>, !torch.none -> !torch.vtensor<[2,1024,5120],f16> | |
%int2048_2301 = torch.constant.int 2048 | |
%int5120_2302 = torch.constant.int 5120 | |
%2083 = torch.prim.ListConstruct %int2048_2301, %int5120_2302 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2084 = torch.aten.view %2082, %2083 : !torch.vtensor<[2,1024,5120],f16>, !torch.list<int> -> !torch.vtensor<[2048,5120],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight : tensor<1280x5120xf16> | |
%2085 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight : tensor<1280x5120xf16> -> !torch.vtensor<[1280,5120],f16> | |
%int0_2303 = torch.constant.int 0 | |
%int1_2304 = torch.constant.int 1 | |
%2086 = torch.aten.transpose.int %2085, %int0_2303, %int1_2304 : !torch.vtensor<[1280,5120],f16>, !torch.int, !torch.int -> !torch.vtensor<[5120,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16> | |
%2087 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2305 = torch.constant.int 6 | |
%2088 = torch.prims.convert_element_type %2087, %int6_2305 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2306 = torch.constant.int 6 | |
%2089 = torch.prims.convert_element_type %2084, %int6_2306 : !torch.vtensor<[2048,5120],f16>, !torch.int -> !torch.vtensor<[2048,5120],f32> | |
%int6_2307 = torch.constant.int 6 | |
%2090 = torch.prims.convert_element_type %2086, %int6_2307 : !torch.vtensor<[5120,1280],f16>, !torch.int -> !torch.vtensor<[5120,1280],f32> | |
%2091 = torch.aten.mm %2089, %2090 : !torch.vtensor<[2048,5120],f32>, !torch.vtensor<[5120,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2308 = torch.constant.int 1 | |
%2092 = torch.aten.mul.Scalar %2091, %int1_2308 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2309 = torch.constant.int 1 | |
%2093 = torch.aten.mul.Scalar %2088, %int1_2309 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2310 = torch.constant.int 1 | |
%2094 = torch.aten.add.Tensor %2092, %2093, %int1_2310 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2311 = torch.constant.int 5 | |
%2095 = torch.prims.convert_element_type %2094, %int5_2311 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2312 = torch.constant.int 2 | |
%int1024_2313 = torch.constant.int 1024 | |
%int1280_2314 = torch.constant.int 1280 | |
%2096 = torch.prim.ListConstruct %int2_2312, %int1024_2313, %int1280_2314 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2097 = torch.aten.view %2095, %2096 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2315 = torch.constant.int 1 | |
%2098 = torch.aten.add.Tensor %2097, %2049, %int1_2315 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2316 = torch.constant.int 6 | |
%2099 = torch.prims.convert_element_type %2098, %int6_2316 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2317 = torch.constant.int 2 | |
%2100 = torch.prim.ListConstruct %int2_2317 : (!torch.int) -> !torch.list<int> | |
%int0_2318 = torch.constant.int 0 | |
%true_2319 = torch.constant.bool true | |
%result0_2320, %result1_2321 = torch.aten.var_mean.correction %2099, %2100, %int0_2318, %true_2319 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2322 = torch.constant.float 1.000000e-05 | |
%int1_2323 = torch.constant.int 1 | |
%2101 = torch.aten.add.Scalar %result0_2320, %float1.000000e-05_2322, %int1_2323 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2102 = torch.aten.rsqrt %2101 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2324 = torch.constant.int 1 | |
%2103 = torch.aten.sub.Tensor %2098, %result1_2321, %int1_2324 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2104 = torch.aten.mul.Tensor %2103, %2102 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight : tensor<1280xf16> | |
%2105 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2106 = torch.aten.mul.Tensor %2104, %2105 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias : tensor<1280xf16> | |
%2107 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2325 = torch.constant.int 1 | |
%2108 = torch.aten.add.Tensor %2106, %2107, %int1_2325 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2326 = torch.constant.int 5 | |
%2109 = torch.prims.convert_element_type %2108, %int5_2326 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2327 = torch.constant.int 5 | |
%2110 = torch.prims.convert_element_type %result1_2321, %int5_2327 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2328 = torch.constant.int 5 | |
%2111 = torch.prims.convert_element_type %2102, %int5_2328 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight : tensor<1280x1280xf16> | |
%2112 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2329 = torch.constant.int 0 | |
%int1_2330 = torch.constant.int 1 | |
%2113 = torch.aten.transpose.int %2112, %int0_2329, %int1_2330 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2331 = torch.constant.int 2048 | |
%int1280_2332 = torch.constant.int 1280 | |
%2114 = torch.prim.ListConstruct %int2048_2331, %int1280_2332 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2115 = torch.aten.view %2109, %2114 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2116 = torch.aten.mm %2115, %2113 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2333 = torch.constant.int 2 | |
%int1024_2334 = torch.constant.int 1024 | |
%int1280_2335 = torch.constant.int 1280 | |
%2117 = torch.prim.ListConstruct %int2_2333, %int1024_2334, %int1280_2335 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2118 = torch.aten.view %2116, %2117 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight : tensor<1280x1280xf16> | |
%2119 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_k.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2336 = torch.constant.int 0 | |
%int1_2337 = torch.constant.int 1 | |
%2120 = torch.aten.transpose.int %2119, %int0_2336, %int1_2337 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2338 = torch.constant.int 2048 | |
%int1280_2339 = torch.constant.int 1280 | |
%2121 = torch.prim.ListConstruct %int2048_2338, %int1280_2339 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2122 = torch.aten.view %2109, %2121 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2123 = torch.aten.mm %2122, %2120 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2340 = torch.constant.int 2 | |
%int1024_2341 = torch.constant.int 1024 | |
%int1280_2342 = torch.constant.int 1280 | |
%2124 = torch.prim.ListConstruct %int2_2340, %int1024_2341, %int1280_2342 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2125 = torch.aten.view %2123, %2124 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight : tensor<1280x1280xf16> | |
%2126 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_v.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2343 = torch.constant.int 0 | |
%int1_2344 = torch.constant.int 1 | |
%2127 = torch.aten.transpose.int %2126, %int0_2343, %int1_2344 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2345 = torch.constant.int 2048 | |
%int1280_2346 = torch.constant.int 1280 | |
%2128 = torch.prim.ListConstruct %int2048_2345, %int1280_2346 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2129 = torch.aten.view %2109, %2128 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2130 = torch.aten.mm %2129, %2127 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2347 = torch.constant.int 2 | |
%int1024_2348 = torch.constant.int 1024 | |
%int1280_2349 = torch.constant.int 1280 | |
%2131 = torch.prim.ListConstruct %int2_2347, %int1024_2348, %int1280_2349 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2132 = torch.aten.view %2130, %2131 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2_2350 = torch.constant.int 2 | |
%int-1_2351 = torch.constant.int -1 | |
%int20_2352 = torch.constant.int 20 | |
%int64_2353 = torch.constant.int 64 | |
%2133 = torch.prim.ListConstruct %int2_2350, %int-1_2351, %int20_2352, %int64_2353 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2134 = torch.aten.view %2118, %2133 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2354 = torch.constant.int 1 | |
%int2_2355 = torch.constant.int 2 | |
%2135 = torch.aten.transpose.int %2134, %int1_2354, %int2_2355 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2356 = torch.constant.int 2 | |
%int-1_2357 = torch.constant.int -1 | |
%int20_2358 = torch.constant.int 20 | |
%int64_2359 = torch.constant.int 64 | |
%2136 = torch.prim.ListConstruct %int2_2356, %int-1_2357, %int20_2358, %int64_2359 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2137 = torch.aten.view %2125, %2136 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2360 = torch.constant.int 1 | |
%int2_2361 = torch.constant.int 2 | |
%2138 = torch.aten.transpose.int %2137, %int1_2360, %int2_2361 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2362 = torch.constant.int 2 | |
%int-1_2363 = torch.constant.int -1 | |
%int20_2364 = torch.constant.int 20 | |
%int64_2365 = torch.constant.int 64 | |
%2139 = torch.prim.ListConstruct %int2_2362, %int-1_2363, %int20_2364, %int64_2365 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2140 = torch.aten.view %2132, %2139 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2366 = torch.constant.int 1 | |
%int2_2367 = torch.constant.int 2 | |
%2141 = torch.aten.transpose.int %2140, %int1_2366, %int2_2367 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%float0.000000e00_2368 = torch.constant.float 0.000000e+00 | |
%false_2369 = torch.constant.bool false | |
%none_2370 = torch.constant.none | |
%none_2371 = torch.constant.none | |
%2142:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%2135, %2138, %2141, %float0.000000e00_2368, %false_2369, %none_2370, %none_2371) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%2143 = torch.aten.detach %2142#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2372 = torch.constant.int 1 | |
%int2_2373 = torch.constant.int 2 | |
%2144 = torch.aten.transpose.int %2142#0, %int1_2372, %int2_2373 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2374 = torch.constant.int 2 | |
%int-1_2375 = torch.constant.int -1 | |
%int1280_2376 = torch.constant.int 1280 | |
%2145 = torch.prim.ListConstruct %int2_2374, %int-1_2375, %int1280_2376 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2146 = torch.aten.view %2144, %2145 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2377 = torch.constant.int 2048 | |
%int1280_2378 = torch.constant.int 1280 | |
%2147 = torch.prim.ListConstruct %int2048_2377, %int1280_2378 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2148 = torch.aten.view %2146, %2147 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight : tensor<1280x1280xf16> | |
%2149 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2379 = torch.constant.int 0 | |
%int1_2380 = torch.constant.int 1 | |
%2150 = torch.aten.transpose.int %2149, %int0_2379, %int1_2380 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16> | |
%2151 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2381 = torch.constant.int 6 | |
%2152 = torch.prims.convert_element_type %2151, %int6_2381 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2382 = torch.constant.int 6 | |
%2153 = torch.prims.convert_element_type %2148, %int6_2382 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2383 = torch.constant.int 6 | |
%2154 = torch.prims.convert_element_type %2150, %int6_2383 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%2155 = torch.aten.mm %2153, %2154 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2384 = torch.constant.int 1 | |
%2156 = torch.aten.mul.Scalar %2155, %int1_2384 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2385 = torch.constant.int 1 | |
%2157 = torch.aten.mul.Scalar %2152, %int1_2385 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2386 = torch.constant.int 1 | |
%2158 = torch.aten.add.Tensor %2156, %2157, %int1_2386 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2387 = torch.constant.int 5 | |
%2159 = torch.prims.convert_element_type %2158, %int5_2387 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2388 = torch.constant.int 2 | |
%int1024_2389 = torch.constant.int 1024 | |
%int1280_2390 = torch.constant.int 1280 | |
%2160 = torch.prim.ListConstruct %int2_2388, %int1024_2389, %int1280_2390 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2161 = torch.aten.view %2159, %2160 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2391 = torch.constant.none | |
%2162 = torch.aten.clone %2161, %none_2391 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2392 = torch.constant.float 1.000000e+00 | |
%2163 = torch.aten.div.Scalar %2162, %float1.000000e00_2392 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2393 = torch.constant.int 1 | |
%2164 = torch.aten.add.Tensor %2163, %2098, %int1_2393 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2394 = torch.constant.int 6 | |
%2165 = torch.prims.convert_element_type %2164, %int6_2394 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2395 = torch.constant.int 2 | |
%2166 = torch.prim.ListConstruct %int2_2395 : (!torch.int) -> !torch.list<int> | |
%int0_2396 = torch.constant.int 0 | |
%true_2397 = torch.constant.bool true | |
%result0_2398, %result1_2399 = torch.aten.var_mean.correction %2165, %2166, %int0_2396, %true_2397 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2400 = torch.constant.float 1.000000e-05 | |
%int1_2401 = torch.constant.int 1 | |
%2167 = torch.aten.add.Scalar %result0_2398, %float1.000000e-05_2400, %int1_2401 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2168 = torch.aten.rsqrt %2167 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2402 = torch.constant.int 1 | |
%2169 = torch.aten.sub.Tensor %2164, %result1_2399, %int1_2402 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2170 = torch.aten.mul.Tensor %2169, %2168 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight : tensor<1280xf16> | |
%2171 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2172 = torch.aten.mul.Tensor %2170, %2171 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias : tensor<1280xf16> | |
%2173 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2403 = torch.constant.int 1 | |
%2174 = torch.aten.add.Tensor %2172, %2173, %int1_2403 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2404 = torch.constant.int 5 | |
%2175 = torch.prims.convert_element_type %2174, %int5_2404 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2405 = torch.constant.int 5 | |
%2176 = torch.prims.convert_element_type %result1_2399, %int5_2405 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2406 = torch.constant.int 5 | |
%2177 = torch.prims.convert_element_type %2168, %int5_2406 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight : tensor<1280x1280xf16> | |
%2178 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2407 = torch.constant.int 0 | |
%int1_2408 = torch.constant.int 1 | |
%2179 = torch.aten.transpose.int %2178, %int0_2407, %int1_2408 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2409 = torch.constant.int 2048 | |
%int1280_2410 = torch.constant.int 1280 | |
%2180 = torch.prim.ListConstruct %int2048_2409, %int1280_2410 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2181 = torch.aten.view %2175, %2180 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2182 = torch.aten.mm %2181, %2179 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2411 = torch.constant.int 2 | |
%int1024_2412 = torch.constant.int 1024 | |
%int1280_2413 = torch.constant.int 1280 | |
%2183 = torch.prim.ListConstruct %int2_2411, %int1024_2412, %int1280_2413 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2184 = torch.aten.view %2182, %2183 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight : tensor<1280x2048xf16> | |
%2185 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_k.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2414 = torch.constant.int 0 | |
%int1_2415 = torch.constant.int 1 | |
%2186 = torch.aten.transpose.int %2185, %int0_2414, %int1_2415 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2416 = torch.constant.int 128 | |
%int2048_2417 = torch.constant.int 2048 | |
%2187 = torch.prim.ListConstruct %int128_2416, %int2048_2417 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2188 = torch.aten.view %arg1, %2187 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%2189 = torch.aten.mm %2188, %2186 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2418 = torch.constant.int 2 | |
%int64_2419 = torch.constant.int 64 | |
%int1280_2420 = torch.constant.int 1280 | |
%2190 = torch.prim.ListConstruct %int2_2418, %int64_2419, %int1280_2420 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2191 = torch.aten.view %2189, %2190 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight : tensor<1280x2048xf16> | |
%2192 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_v.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2421 = torch.constant.int 0 | |
%int1_2422 = torch.constant.int 1 | |
%2193 = torch.aten.transpose.int %2192, %int0_2421, %int1_2422 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2423 = torch.constant.int 128 | |
%int2048_2424 = torch.constant.int 2048 | |
%2194 = torch.prim.ListConstruct %int128_2423, %int2048_2424 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2195 = torch.aten.view %arg1, %2194 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%2196 = torch.aten.mm %2195, %2193 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2425 = torch.constant.int 2 | |
%int64_2426 = torch.constant.int 64 | |
%int1280_2427 = torch.constant.int 1280 | |
%2197 = torch.prim.ListConstruct %int2_2425, %int64_2426, %int1280_2427 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2198 = torch.aten.view %2196, %2197 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%int2_2428 = torch.constant.int 2 | |
%int-1_2429 = torch.constant.int -1 | |
%int20_2430 = torch.constant.int 20 | |
%int64_2431 = torch.constant.int 64 | |
%2199 = torch.prim.ListConstruct %int2_2428, %int-1_2429, %int20_2430, %int64_2431 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2200 = torch.aten.view %2184, %2199 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2432 = torch.constant.int 1 | |
%int2_2433 = torch.constant.int 2 | |
%2201 = torch.aten.transpose.int %2200, %int1_2432, %int2_2433 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2434 = torch.constant.int 2 | |
%int-1_2435 = torch.constant.int -1 | |
%int20_2436 = torch.constant.int 20 | |
%int64_2437 = torch.constant.int 64 | |
%2202 = torch.prim.ListConstruct %int2_2434, %int-1_2435, %int20_2436, %int64_2437 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2203 = torch.aten.view %2191, %2202 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2438 = torch.constant.int 1 | |
%int2_2439 = torch.constant.int 2 | |
%2204 = torch.aten.transpose.int %2203, %int1_2438, %int2_2439 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%int2_2440 = torch.constant.int 2 | |
%int-1_2441 = torch.constant.int -1 | |
%int20_2442 = torch.constant.int 20 | |
%int64_2443 = torch.constant.int 64 | |
%2205 = torch.prim.ListConstruct %int2_2440, %int-1_2441, %int20_2442, %int64_2443 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2206 = torch.aten.view %2198, %2205 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2444 = torch.constant.int 1 | |
%int2_2445 = torch.constant.int 2 | |
%2207 = torch.aten.transpose.int %2206, %int1_2444, %int2_2445 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%float0.000000e00_2446 = torch.constant.float 0.000000e+00 | |
%false_2447 = torch.constant.bool false | |
%none_2448 = torch.constant.none | |
%none_2449 = torch.constant.none | |
%2208:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%2201, %2204, %2207, %float0.000000e00_2446, %false_2447, %none_2448, %none_2449) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%2209 = torch.aten.detach %2208#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2450 = torch.constant.int 1 | |
%int2_2451 = torch.constant.int 2 | |
%2210 = torch.aten.transpose.int %2208#0, %int1_2450, %int2_2451 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2452 = torch.constant.int 2 | |
%int-1_2453 = torch.constant.int -1 | |
%int1280_2454 = torch.constant.int 1280 | |
%2211 = torch.prim.ListConstruct %int2_2452, %int-1_2453, %int1280_2454 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2212 = torch.aten.view %2210, %2211 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2455 = torch.constant.int 2048 | |
%int1280_2456 = torch.constant.int 1280 | |
%2213 = torch.prim.ListConstruct %int2048_2455, %int1280_2456 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2214 = torch.aten.view %2212, %2213 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight : tensor<1280x1280xf16> | |
%2215 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2457 = torch.constant.int 0 | |
%int1_2458 = torch.constant.int 1 | |
%2216 = torch.aten.transpose.int %2215, %int0_2457, %int1_2458 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16> | |
%2217 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2459 = torch.constant.int 6 | |
%2218 = torch.prims.convert_element_type %2217, %int6_2459 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2460 = torch.constant.int 6 | |
%2219 = torch.prims.convert_element_type %2214, %int6_2460 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2461 = torch.constant.int 6 | |
%2220 = torch.prims.convert_element_type %2216, %int6_2461 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%2221 = torch.aten.mm %2219, %2220 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2462 = torch.constant.int 1 | |
%2222 = torch.aten.mul.Scalar %2221, %int1_2462 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2463 = torch.constant.int 1 | |
%2223 = torch.aten.mul.Scalar %2218, %int1_2463 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2464 = torch.constant.int 1 | |
%2224 = torch.aten.add.Tensor %2222, %2223, %int1_2464 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2465 = torch.constant.int 5 | |
%2225 = torch.prims.convert_element_type %2224, %int5_2465 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2466 = torch.constant.int 2 | |
%int1024_2467 = torch.constant.int 1024 | |
%int1280_2468 = torch.constant.int 1280 | |
%2226 = torch.prim.ListConstruct %int2_2466, %int1024_2467, %int1280_2468 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2227 = torch.aten.view %2225, %2226 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2469 = torch.constant.none | |
%2228 = torch.aten.clone %2227, %none_2469 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2470 = torch.constant.float 1.000000e+00 | |
%2229 = torch.aten.div.Scalar %2228, %float1.000000e00_2470 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2471 = torch.constant.int 1 | |
%2230 = torch.aten.add.Tensor %2229, %2164, %int1_2471 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2472 = torch.constant.int 6 | |
%2231 = torch.prims.convert_element_type %2230, %int6_2472 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2473 = torch.constant.int 2 | |
%2232 = torch.prim.ListConstruct %int2_2473 : (!torch.int) -> !torch.list<int> | |
%int0_2474 = torch.constant.int 0 | |
%true_2475 = torch.constant.bool true | |
%result0_2476, %result1_2477 = torch.aten.var_mean.correction %2231, %2232, %int0_2474, %true_2475 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2478 = torch.constant.float 1.000000e-05 | |
%int1_2479 = torch.constant.int 1 | |
%2233 = torch.aten.add.Scalar %result0_2476, %float1.000000e-05_2478, %int1_2479 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2234 = torch.aten.rsqrt %2233 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2480 = torch.constant.int 1 | |
%2235 = torch.aten.sub.Tensor %2230, %result1_2477, %int1_2480 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2236 = torch.aten.mul.Tensor %2235, %2234 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight : tensor<1280xf16> | |
%2237 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2238 = torch.aten.mul.Tensor %2236, %2237 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias : tensor<1280xf16> | |
%2239 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2481 = torch.constant.int 1 | |
%2240 = torch.aten.add.Tensor %2238, %2239, %int1_2481 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2482 = torch.constant.int 5 | |
%2241 = torch.prims.convert_element_type %2240, %int5_2482 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2483 = torch.constant.int 5 | |
%2242 = torch.prims.convert_element_type %result1_2477, %int5_2483 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2484 = torch.constant.int 5 | |
%2243 = torch.prims.convert_element_type %2234, %int5_2484 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int2048_2485 = torch.constant.int 2048 | |
%int1280_2486 = torch.constant.int 1280 | |
%2244 = torch.prim.ListConstruct %int2048_2485, %int1280_2486 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2245 = torch.aten.view %2241, %2244 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight : tensor<10240x1280xf16> | |
%2246 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight : tensor<10240x1280xf16> -> !torch.vtensor<[10240,1280],f16> | |
%int0_2487 = torch.constant.int 0 | |
%int1_2488 = torch.constant.int 1 | |
%2247 = torch.aten.transpose.int %2246, %int0_2487, %int1_2488 : !torch.vtensor<[10240,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,10240],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16> | |
%2248 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias : tensor<10240xf16> -> !torch.vtensor<[10240],f16> | |
%int6_2489 = torch.constant.int 6 | |
%2249 = torch.prims.convert_element_type %2248, %int6_2489 : !torch.vtensor<[10240],f16>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int6_2490 = torch.constant.int 6 | |
%2250 = torch.prims.convert_element_type %2245, %int6_2490 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2491 = torch.constant.int 6 | |
%2251 = torch.prims.convert_element_type %2247, %int6_2491 : !torch.vtensor<[1280,10240],f16>, !torch.int -> !torch.vtensor<[1280,10240],f32> | |
%2252 = torch.aten.mm %2250, %2251 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,10240],f32> -> !torch.vtensor<[2048,10240],f32> | |
%int1_2492 = torch.constant.int 1 | |
%2253 = torch.aten.mul.Scalar %2252, %int1_2492 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int1_2493 = torch.constant.int 1 | |
%2254 = torch.aten.mul.Scalar %2249, %int1_2493 : !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int1_2494 = torch.constant.int 1 | |
%2255 = torch.aten.add.Tensor %2253, %2254, %int1_2494 : !torch.vtensor<[2048,10240],f32>, !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int5_2495 = torch.constant.int 5 | |
%2256 = torch.prims.convert_element_type %2255, %int5_2495 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f16> | |
%int2_2496 = torch.constant.int 2 | |
%int1024_2497 = torch.constant.int 1024 | |
%int10240_2498 = torch.constant.int 10240 | |
%2257 = torch.prim.ListConstruct %int2_2496, %int1024_2497, %int10240_2498 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2258 = torch.aten.view %2256, %2257 : !torch.vtensor<[2048,10240],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,10240],f16> | |
%int-1_2499 = torch.constant.int -1 | |
%int0_2500 = torch.constant.int 0 | |
%int5120_2501 = torch.constant.int 5120 | |
%int1_2502 = torch.constant.int 1 | |
%2259 = torch.aten.slice.Tensor %2258, %int-1_2499, %int0_2500, %int5120_2501, %int1_2502 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%int-1_2503 = torch.constant.int -1 | |
%int5120_2504 = torch.constant.int 5120 | |
%int10240_2505 = torch.constant.int 10240 | |
%int1_2506 = torch.constant.int 1 | |
%2260 = torch.aten.slice.Tensor %2258, %int-1_2503, %int5120_2504, %int10240_2505, %int1_2506 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%str_2507 = torch.constant.str "none" | |
%2261 = torch.aten.gelu %2260, %str_2507 : !torch.vtensor<[2,1024,5120],f16>, !torch.str -> !torch.vtensor<[2,1024,5120],f16> | |
%2262 = torch.aten.mul.Tensor %2259, %2261 : !torch.vtensor<[2,1024,5120],f16>, !torch.vtensor<[2,1024,5120],f16> -> !torch.vtensor<[2,1024,5120],f16> | |
%none_2508 = torch.constant.none | |
%2263 = torch.aten.clone %2262, %none_2508 : !torch.vtensor<[2,1024,5120],f16>, !torch.none -> !torch.vtensor<[2,1024,5120],f16> | |
%int2048_2509 = torch.constant.int 2048 | |
%int5120_2510 = torch.constant.int 5120 | |
%2264 = torch.prim.ListConstruct %int2048_2509, %int5120_2510 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2265 = torch.aten.view %2263, %2264 : !torch.vtensor<[2,1024,5120],f16>, !torch.list<int> -> !torch.vtensor<[2048,5120],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight : tensor<1280x5120xf16> | |
%2266 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight : tensor<1280x5120xf16> -> !torch.vtensor<[1280,5120],f16> | |
%int0_2511 = torch.constant.int 0 | |
%int1_2512 = torch.constant.int 1 | |
%2267 = torch.aten.transpose.int %2266, %int0_2511, %int1_2512 : !torch.vtensor<[1280,5120],f16>, !torch.int, !torch.int -> !torch.vtensor<[5120,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16> | |
%2268 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2513 = torch.constant.int 6 | |
%2269 = torch.prims.convert_element_type %2268, %int6_2513 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2514 = torch.constant.int 6 | |
%2270 = torch.prims.convert_element_type %2265, %int6_2514 : !torch.vtensor<[2048,5120],f16>, !torch.int -> !torch.vtensor<[2048,5120],f32> | |
%int6_2515 = torch.constant.int 6 | |
%2271 = torch.prims.convert_element_type %2267, %int6_2515 : !torch.vtensor<[5120,1280],f16>, !torch.int -> !torch.vtensor<[5120,1280],f32> | |
%2272 = torch.aten.mm %2270, %2271 : !torch.vtensor<[2048,5120],f32>, !torch.vtensor<[5120,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2516 = torch.constant.int 1 | |
%2273 = torch.aten.mul.Scalar %2272, %int1_2516 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2517 = torch.constant.int 1 | |
%2274 = torch.aten.mul.Scalar %2269, %int1_2517 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2518 = torch.constant.int 1 | |
%2275 = torch.aten.add.Tensor %2273, %2274, %int1_2518 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2519 = torch.constant.int 5 | |
%2276 = torch.prims.convert_element_type %2275, %int5_2519 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2520 = torch.constant.int 2 | |
%int1024_2521 = torch.constant.int 1024 | |
%int1280_2522 = torch.constant.int 1280 | |
%2277 = torch.prim.ListConstruct %int2_2520, %int1024_2521, %int1280_2522 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2278 = torch.aten.view %2276, %2277 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2523 = torch.constant.int 1 | |
%2279 = torch.aten.add.Tensor %2278, %2230, %int1_2523 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2524 = torch.constant.int 6 | |
%2280 = torch.prims.convert_element_type %2279, %int6_2524 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2525 = torch.constant.int 2 | |
%2281 = torch.prim.ListConstruct %int2_2525 : (!torch.int) -> !torch.list<int> | |
%int0_2526 = torch.constant.int 0 | |
%true_2527 = torch.constant.bool true | |
%result0_2528, %result1_2529 = torch.aten.var_mean.correction %2280, %2281, %int0_2526, %true_2527 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2530 = torch.constant.float 1.000000e-05 | |
%int1_2531 = torch.constant.int 1 | |
%2282 = torch.aten.add.Scalar %result0_2528, %float1.000000e-05_2530, %int1_2531 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2283 = torch.aten.rsqrt %2282 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2532 = torch.constant.int 1 | |
%2284 = torch.aten.sub.Tensor %2279, %result1_2529, %int1_2532 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2285 = torch.aten.mul.Tensor %2284, %2283 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight : tensor<1280xf16> | |
%2286 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2287 = torch.aten.mul.Tensor %2285, %2286 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias : tensor<1280xf16> | |
%2288 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2533 = torch.constant.int 1 | |
%2289 = torch.aten.add.Tensor %2287, %2288, %int1_2533 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2534 = torch.constant.int 5 | |
%2290 = torch.prims.convert_element_type %2289, %int5_2534 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2535 = torch.constant.int 5 | |
%2291 = torch.prims.convert_element_type %result1_2529, %int5_2535 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2536 = torch.constant.int 5 | |
%2292 = torch.prims.convert_element_type %2283, %int5_2536 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight : tensor<1280x1280xf16> | |
%2293 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2537 = torch.constant.int 0 | |
%int1_2538 = torch.constant.int 1 | |
%2294 = torch.aten.transpose.int %2293, %int0_2537, %int1_2538 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2539 = torch.constant.int 2048 | |
%int1280_2540 = torch.constant.int 1280 | |
%2295 = torch.prim.ListConstruct %int2048_2539, %int1280_2540 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2296 = torch.aten.view %2290, %2295 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2297 = torch.aten.mm %2296, %2294 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2541 = torch.constant.int 2 | |
%int1024_2542 = torch.constant.int 1024 | |
%int1280_2543 = torch.constant.int 1280 | |
%2298 = torch.prim.ListConstruct %int2_2541, %int1024_2542, %int1280_2543 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2299 = torch.aten.view %2297, %2298 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight : tensor<1280x1280xf16> | |
%2300 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_k.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2544 = torch.constant.int 0 | |
%int1_2545 = torch.constant.int 1 | |
%2301 = torch.aten.transpose.int %2300, %int0_2544, %int1_2545 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2546 = torch.constant.int 2048 | |
%int1280_2547 = torch.constant.int 1280 | |
%2302 = torch.prim.ListConstruct %int2048_2546, %int1280_2547 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2303 = torch.aten.view %2290, %2302 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2304 = torch.aten.mm %2303, %2301 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2548 = torch.constant.int 2 | |
%int1024_2549 = torch.constant.int 1024 | |
%int1280_2550 = torch.constant.int 1280 | |
%2305 = torch.prim.ListConstruct %int2_2548, %int1024_2549, %int1280_2550 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2306 = torch.aten.view %2304, %2305 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight : tensor<1280x1280xf16> | |
%2307 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_v.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2551 = torch.constant.int 0 | |
%int1_2552 = torch.constant.int 1 | |
%2308 = torch.aten.transpose.int %2307, %int0_2551, %int1_2552 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2553 = torch.constant.int 2048 | |
%int1280_2554 = torch.constant.int 1280 | |
%2309 = torch.prim.ListConstruct %int2048_2553, %int1280_2554 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2310 = torch.aten.view %2290, %2309 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2311 = torch.aten.mm %2310, %2308 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2555 = torch.constant.int 2 | |
%int1024_2556 = torch.constant.int 1024 | |
%int1280_2557 = torch.constant.int 1280 | |
%2312 = torch.prim.ListConstruct %int2_2555, %int1024_2556, %int1280_2557 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2313 = torch.aten.view %2311, %2312 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2_2558 = torch.constant.int 2 | |
%int-1_2559 = torch.constant.int -1 | |
%int20_2560 = torch.constant.int 20 | |
%int64_2561 = torch.constant.int 64 | |
%2314 = torch.prim.ListConstruct %int2_2558, %int-1_2559, %int20_2560, %int64_2561 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2315 = torch.aten.view %2299, %2314 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2562 = torch.constant.int 1 | |
%int2_2563 = torch.constant.int 2 | |
%2316 = torch.aten.transpose.int %2315, %int1_2562, %int2_2563 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2564 = torch.constant.int 2 | |
%int-1_2565 = torch.constant.int -1 | |
%int20_2566 = torch.constant.int 20 | |
%int64_2567 = torch.constant.int 64 | |
%2317 = torch.prim.ListConstruct %int2_2564, %int-1_2565, %int20_2566, %int64_2567 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2318 = torch.aten.view %2306, %2317 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2568 = torch.constant.int 1 | |
%int2_2569 = torch.constant.int 2 | |
%2319 = torch.aten.transpose.int %2318, %int1_2568, %int2_2569 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2570 = torch.constant.int 2 | |
%int-1_2571 = torch.constant.int -1 | |
%int20_2572 = torch.constant.int 20 | |
%int64_2573 = torch.constant.int 64 | |
%2320 = torch.prim.ListConstruct %int2_2570, %int-1_2571, %int20_2572, %int64_2573 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2321 = torch.aten.view %2313, %2320 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2574 = torch.constant.int 1 | |
%int2_2575 = torch.constant.int 2 | |
%2322 = torch.aten.transpose.int %2321, %int1_2574, %int2_2575 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%float0.000000e00_2576 = torch.constant.float 0.000000e+00 | |
%false_2577 = torch.constant.bool false | |
%none_2578 = torch.constant.none | |
%none_2579 = torch.constant.none | |
%2323:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%2316, %2319, %2322, %float0.000000e00_2576, %false_2577, %none_2578, %none_2579) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%2324 = torch.aten.detach %2323#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2580 = torch.constant.int 1 | |
%int2_2581 = torch.constant.int 2 | |
%2325 = torch.aten.transpose.int %2323#0, %int1_2580, %int2_2581 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2582 = torch.constant.int 2 | |
%int-1_2583 = torch.constant.int -1 | |
%int1280_2584 = torch.constant.int 1280 | |
%2326 = torch.prim.ListConstruct %int2_2582, %int-1_2583, %int1280_2584 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2327 = torch.aten.view %2325, %2326 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2585 = torch.constant.int 2048 | |
%int1280_2586 = torch.constant.int 1280 | |
%2328 = torch.prim.ListConstruct %int2048_2585, %int1280_2586 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2329 = torch.aten.view %2327, %2328 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight : tensor<1280x1280xf16> | |
%2330 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2587 = torch.constant.int 0 | |
%int1_2588 = torch.constant.int 1 | |
%2331 = torch.aten.transpose.int %2330, %int0_2587, %int1_2588 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16> | |
%2332 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2589 = torch.constant.int 6 | |
%2333 = torch.prims.convert_element_type %2332, %int6_2589 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2590 = torch.constant.int 6 | |
%2334 = torch.prims.convert_element_type %2329, %int6_2590 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2591 = torch.constant.int 6 | |
%2335 = torch.prims.convert_element_type %2331, %int6_2591 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%2336 = torch.aten.mm %2334, %2335 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2592 = torch.constant.int 1 | |
%2337 = torch.aten.mul.Scalar %2336, %int1_2592 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2593 = torch.constant.int 1 | |
%2338 = torch.aten.mul.Scalar %2333, %int1_2593 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2594 = torch.constant.int 1 | |
%2339 = torch.aten.add.Tensor %2337, %2338, %int1_2594 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2595 = torch.constant.int 5 | |
%2340 = torch.prims.convert_element_type %2339, %int5_2595 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2596 = torch.constant.int 2 | |
%int1024_2597 = torch.constant.int 1024 | |
%int1280_2598 = torch.constant.int 1280 | |
%2341 = torch.prim.ListConstruct %int2_2596, %int1024_2597, %int1280_2598 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2342 = torch.aten.view %2340, %2341 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2599 = torch.constant.none | |
%2343 = torch.aten.clone %2342, %none_2599 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2600 = torch.constant.float 1.000000e+00 | |
%2344 = torch.aten.div.Scalar %2343, %float1.000000e00_2600 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2601 = torch.constant.int 1 | |
%2345 = torch.aten.add.Tensor %2344, %2279, %int1_2601 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2602 = torch.constant.int 6 | |
%2346 = torch.prims.convert_element_type %2345, %int6_2602 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2603 = torch.constant.int 2 | |
%2347 = torch.prim.ListConstruct %int2_2603 : (!torch.int) -> !torch.list<int> | |
%int0_2604 = torch.constant.int 0 | |
%true_2605 = torch.constant.bool true | |
%result0_2606, %result1_2607 = torch.aten.var_mean.correction %2346, %2347, %int0_2604, %true_2605 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2608 = torch.constant.float 1.000000e-05 | |
%int1_2609 = torch.constant.int 1 | |
%2348 = torch.aten.add.Scalar %result0_2606, %float1.000000e-05_2608, %int1_2609 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2349 = torch.aten.rsqrt %2348 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2610 = torch.constant.int 1 | |
%2350 = torch.aten.sub.Tensor %2345, %result1_2607, %int1_2610 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2351 = torch.aten.mul.Tensor %2350, %2349 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight : tensor<1280xf16> | |
%2352 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2353 = torch.aten.mul.Tensor %2351, %2352 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias : tensor<1280xf16> | |
%2354 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2611 = torch.constant.int 1 | |
%2355 = torch.aten.add.Tensor %2353, %2354, %int1_2611 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2612 = torch.constant.int 5 | |
%2356 = torch.prims.convert_element_type %2355, %int5_2612 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2613 = torch.constant.int 5 | |
%2357 = torch.prims.convert_element_type %result1_2607, %int5_2613 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2614 = torch.constant.int 5 | |
%2358 = torch.prims.convert_element_type %2349, %int5_2614 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight : tensor<1280x1280xf16> | |
%2359 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2615 = torch.constant.int 0 | |
%int1_2616 = torch.constant.int 1 | |
%2360 = torch.aten.transpose.int %2359, %int0_2615, %int1_2616 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2617 = torch.constant.int 2048 | |
%int1280_2618 = torch.constant.int 1280 | |
%2361 = torch.prim.ListConstruct %int2048_2617, %int1280_2618 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2362 = torch.aten.view %2356, %2361 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2363 = torch.aten.mm %2362, %2360 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2619 = torch.constant.int 2 | |
%int1024_2620 = torch.constant.int 1024 | |
%int1280_2621 = torch.constant.int 1280 | |
%2364 = torch.prim.ListConstruct %int2_2619, %int1024_2620, %int1280_2621 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2365 = torch.aten.view %2363, %2364 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight : tensor<1280x2048xf16> | |
%2366 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_k.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2622 = torch.constant.int 0 | |
%int1_2623 = torch.constant.int 1 | |
%2367 = torch.aten.transpose.int %2366, %int0_2622, %int1_2623 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2624 = torch.constant.int 128 | |
%int2048_2625 = torch.constant.int 2048 | |
%2368 = torch.prim.ListConstruct %int128_2624, %int2048_2625 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2369 = torch.aten.view %arg1, %2368 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%2370 = torch.aten.mm %2369, %2367 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2626 = torch.constant.int 2 | |
%int64_2627 = torch.constant.int 64 | |
%int1280_2628 = torch.constant.int 1280 | |
%2371 = torch.prim.ListConstruct %int2_2626, %int64_2627, %int1280_2628 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2372 = torch.aten.view %2370, %2371 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight : tensor<1280x2048xf16> | |
%2373 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_v.weight : tensor<1280x2048xf16> -> !torch.vtensor<[1280,2048],f16> | |
%int0_2629 = torch.constant.int 0 | |
%int1_2630 = torch.constant.int 1 | |
%2374 = torch.aten.transpose.int %2373, %int0_2629, %int1_2630 : !torch.vtensor<[1280,2048],f16>, !torch.int, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int128_2631 = torch.constant.int 128 | |
%int2048_2632 = torch.constant.int 2048 | |
%2375 = torch.prim.ListConstruct %int128_2631, %int2048_2632 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2376 = torch.aten.view %arg1, %2375 : !torch.vtensor<[2,64,2048],f16>, !torch.list<int> -> !torch.vtensor<[128,2048],f16> | |
%2377 = torch.aten.mm %2376, %2374 : !torch.vtensor<[128,2048],f16>, !torch.vtensor<[2048,1280],f16> -> !torch.vtensor<[128,1280],f16> | |
%int2_2633 = torch.constant.int 2 | |
%int64_2634 = torch.constant.int 64 | |
%int1280_2635 = torch.constant.int 1280 | |
%2378 = torch.prim.ListConstruct %int2_2633, %int64_2634, %int1280_2635 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2379 = torch.aten.view %2377, %2378 : !torch.vtensor<[128,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,1280],f16> | |
%int2_2636 = torch.constant.int 2 | |
%int-1_2637 = torch.constant.int -1 | |
%int20_2638 = torch.constant.int 20 | |
%int64_2639 = torch.constant.int 64 | |
%2380 = torch.prim.ListConstruct %int2_2636, %int-1_2637, %int20_2638, %int64_2639 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2381 = torch.aten.view %2365, %2380 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2640 = torch.constant.int 1 | |
%int2_2641 = torch.constant.int 2 | |
%2382 = torch.aten.transpose.int %2381, %int1_2640, %int2_2641 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2642 = torch.constant.int 2 | |
%int-1_2643 = torch.constant.int -1 | |
%int20_2644 = torch.constant.int 20 | |
%int64_2645 = torch.constant.int 64 | |
%2383 = torch.prim.ListConstruct %int2_2642, %int-1_2643, %int20_2644, %int64_2645 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2384 = torch.aten.view %2372, %2383 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2646 = torch.constant.int 1 | |
%int2_2647 = torch.constant.int 2 | |
%2385 = torch.aten.transpose.int %2384, %int1_2646, %int2_2647 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%int2_2648 = torch.constant.int 2 | |
%int-1_2649 = torch.constant.int -1 | |
%int20_2650 = torch.constant.int 20 | |
%int64_2651 = torch.constant.int 64 | |
%2386 = torch.prim.ListConstruct %int2_2648, %int-1_2649, %int20_2650, %int64_2651 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2387 = torch.aten.view %2379, %2386 : !torch.vtensor<[2,64,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,64,20,64],f16> | |
%int1_2652 = torch.constant.int 1 | |
%int2_2653 = torch.constant.int 2 | |
%2388 = torch.aten.transpose.int %2387, %int1_2652, %int2_2653 : !torch.vtensor<[2,64,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,64,64],f16> | |
%float0.000000e00_2654 = torch.constant.float 0.000000e+00 | |
%false_2655 = torch.constant.bool false | |
%none_2656 = torch.constant.none | |
%none_2657 = torch.constant.none | |
%2389:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%2382, %2385, %2388, %float0.000000e00_2654, %false_2655, %none_2656, %none_2657) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.vtensor<[2,20,64,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%2390 = torch.aten.detach %2389#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2658 = torch.constant.int 1 | |
%int2_2659 = torch.constant.int 2 | |
%2391 = torch.aten.transpose.int %2389#0, %int1_2658, %int2_2659 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2660 = torch.constant.int 2 | |
%int-1_2661 = torch.constant.int -1 | |
%int1280_2662 = torch.constant.int 1280 | |
%2392 = torch.prim.ListConstruct %int2_2660, %int-1_2661, %int1280_2662 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2393 = torch.aten.view %2391, %2392 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2663 = torch.constant.int 2048 | |
%int1280_2664 = torch.constant.int 1280 | |
%2394 = torch.prim.ListConstruct %int2048_2663, %int1280_2664 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2395 = torch.aten.view %2393, %2394 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight : tensor<1280x1280xf16> | |
%2396 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2665 = torch.constant.int 0 | |
%int1_2666 = torch.constant.int 1 | |
%2397 = torch.aten.transpose.int %2396, %int0_2665, %int1_2666 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16> | |
%2398 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2667 = torch.constant.int 6 | |
%2399 = torch.prims.convert_element_type %2398, %int6_2667 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2668 = torch.constant.int 6 | |
%2400 = torch.prims.convert_element_type %2395, %int6_2668 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2669 = torch.constant.int 6 | |
%2401 = torch.prims.convert_element_type %2397, %int6_2669 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%2402 = torch.aten.mm %2400, %2401 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2670 = torch.constant.int 1 | |
%2403 = torch.aten.mul.Scalar %2402, %int1_2670 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2671 = torch.constant.int 1 | |
%2404 = torch.aten.mul.Scalar %2399, %int1_2671 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2672 = torch.constant.int 1 | |
%2405 = torch.aten.add.Tensor %2403, %2404, %int1_2672 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2673 = torch.constant.int 5 | |
%2406 = torch.prims.convert_element_type %2405, %int5_2673 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2674 = torch.constant.int 2 | |
%int1024_2675 = torch.constant.int 1024 | |
%int1280_2676 = torch.constant.int 1280 | |
%2407 = torch.prim.ListConstruct %int2_2674, %int1024_2675, %int1280_2676 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2408 = torch.aten.view %2406, %2407 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2677 = torch.constant.none | |
%2409 = torch.aten.clone %2408, %none_2677 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2678 = torch.constant.float 1.000000e+00 | |
%2410 = torch.aten.div.Scalar %2409, %float1.000000e00_2678 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2679 = torch.constant.int 1 | |
%2411 = torch.aten.add.Tensor %2410, %2345, %int1_2679 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2680 = torch.constant.int 6 | |
%2412 = torch.prims.convert_element_type %2411, %int6_2680 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2681 = torch.constant.int 2 | |
%2413 = torch.prim.ListConstruct %int2_2681 : (!torch.int) -> !torch.list<int> | |
%int0_2682 = torch.constant.int 0 | |
%true_2683 = torch.constant.bool true | |
%result0_2684, %result1_2685 = torch.aten.var_mean.correction %2412, %2413, %int0_2682, %true_2683 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2686 = torch.constant.float 1.000000e-05 | |
%int1_2687 = torch.constant.int 1 | |
%2414 = torch.aten.add.Scalar %result0_2684, %float1.000000e-05_2686, %int1_2687 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2415 = torch.aten.rsqrt %2414 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2688 = torch.constant.int 1 | |
%2416 = torch.aten.sub.Tensor %2411, %result1_2685, %int1_2688 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2417 = torch.aten.mul.Tensor %2416, %2415 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight : tensor<1280xf16> | |
%2418 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2419 = torch.aten.mul.Tensor %2417, %2418 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias : tensor<1280xf16> | |
%2420 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2689 = torch.constant.int 1 | |
%2421 = torch.aten.add.Tensor %2419, %2420, %int1_2689 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2690 = torch.constant.int 5 | |
%2422 = torch.prims.convert_element_type %2421, %int5_2690 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2691 = torch.constant.int 5 | |
%2423 = torch.prims.convert_element_type %result1_2685, %int5_2691 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2692 = torch.constant.int 5 | |
%2424 = torch.prims.convert_element_type %2415, %int5_2692 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int2048_2693 = torch.constant.int 2048 | |
%int1280_2694 = torch.constant.int 1280 | |
%2425 = torch.prim.ListConstruct %int2048_2693, %int1280_2694 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2426 = torch.aten.view %2422, %2425 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight : tensor<10240x1280xf16> | |
%2427 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight : tensor<10240x1280xf16> -> !torch.vtensor<[10240,1280],f16> | |
%int0_2695 = torch.constant.int 0 | |
%int1_2696 = torch.constant.int 1 | |
%2428 = torch.aten.transpose.int %2427, %int0_2695, %int1_2696 : !torch.vtensor<[10240,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,10240],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16> | |
%2429 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias : tensor<10240xf16> -> !torch.vtensor<[10240],f16> | |
%int6_2697 = torch.constant.int 6 | |
%2430 = torch.prims.convert_element_type %2429, %int6_2697 : !torch.vtensor<[10240],f16>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int6_2698 = torch.constant.int 6 | |
%2431 = torch.prims.convert_element_type %2426, %int6_2698 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2699 = torch.constant.int 6 | |
%2432 = torch.prims.convert_element_type %2428, %int6_2699 : !torch.vtensor<[1280,10240],f16>, !torch.int -> !torch.vtensor<[1280,10240],f32> | |
%2433 = torch.aten.mm %2431, %2432 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,10240],f32> -> !torch.vtensor<[2048,10240],f32> | |
%int1_2700 = torch.constant.int 1 | |
%2434 = torch.aten.mul.Scalar %2433, %int1_2700 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int1_2701 = torch.constant.int 1 | |
%2435 = torch.aten.mul.Scalar %2430, %int1_2701 : !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[10240],f32> | |
%int1_2702 = torch.constant.int 1 | |
%2436 = torch.aten.add.Tensor %2434, %2435, %int1_2702 : !torch.vtensor<[2048,10240],f32>, !torch.vtensor<[10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f32> | |
%int5_2703 = torch.constant.int 5 | |
%2437 = torch.prims.convert_element_type %2436, %int5_2703 : !torch.vtensor<[2048,10240],f32>, !torch.int -> !torch.vtensor<[2048,10240],f16> | |
%int2_2704 = torch.constant.int 2 | |
%int1024_2705 = torch.constant.int 1024 | |
%int10240_2706 = torch.constant.int 10240 | |
%2438 = torch.prim.ListConstruct %int2_2704, %int1024_2705, %int10240_2706 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2439 = torch.aten.view %2437, %2438 : !torch.vtensor<[2048,10240],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,10240],f16> | |
%int-1_2707 = torch.constant.int -1 | |
%int0_2708 = torch.constant.int 0 | |
%int5120_2709 = torch.constant.int 5120 | |
%int1_2710 = torch.constant.int 1 | |
%2440 = torch.aten.slice.Tensor %2439, %int-1_2707, %int0_2708, %int5120_2709, %int1_2710 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%int-1_2711 = torch.constant.int -1 | |
%int5120_2712 = torch.constant.int 5120 | |
%int10240_2713 = torch.constant.int 10240 | |
%int1_2714 = torch.constant.int 1 | |
%2441 = torch.aten.slice.Tensor %2439, %int-1_2711, %int5120_2712, %int10240_2713, %int1_2714 : !torch.vtensor<[2,1024,10240],f16>, !torch.int, !torch.int, !torch.int, !torch.int -> !torch.vtensor<[2,1024,5120],f16> | |
%str_2715 = torch.constant.str "none" | |
%2442 = torch.aten.gelu %2441, %str_2715 : !torch.vtensor<[2,1024,5120],f16>, !torch.str -> !torch.vtensor<[2,1024,5120],f16> | |
%2443 = torch.aten.mul.Tensor %2440, %2442 : !torch.vtensor<[2,1024,5120],f16>, !torch.vtensor<[2,1024,5120],f16> -> !torch.vtensor<[2,1024,5120],f16> | |
%none_2716 = torch.constant.none | |
%2444 = torch.aten.clone %2443, %none_2716 : !torch.vtensor<[2,1024,5120],f16>, !torch.none -> !torch.vtensor<[2,1024,5120],f16> | |
%int2048_2717 = torch.constant.int 2048 | |
%int5120_2718 = torch.constant.int 5120 | |
%2445 = torch.prim.ListConstruct %int2048_2717, %int5120_2718 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2446 = torch.aten.view %2444, %2445 : !torch.vtensor<[2,1024,5120],f16>, !torch.list<int> -> !torch.vtensor<[2048,5120],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight : tensor<1280x5120xf16> | |
%2447 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight : tensor<1280x5120xf16> -> !torch.vtensor<[1280,5120],f16> | |
%int0_2719 = torch.constant.int 0 | |
%int1_2720 = torch.constant.int 1 | |
%2448 = torch.aten.transpose.int %2447, %int0_2719, %int1_2720 : !torch.vtensor<[1280,5120],f16>, !torch.int, !torch.int -> !torch.vtensor<[5120,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16> | |
%2449 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2721 = torch.constant.int 6 | |
%2450 = torch.prims.convert_element_type %2449, %int6_2721 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2722 = torch.constant.int 6 | |
%2451 = torch.prims.convert_element_type %2446, %int6_2722 : !torch.vtensor<[2048,5120],f16>, !torch.int -> !torch.vtensor<[2048,5120],f32> | |
%int6_2723 = torch.constant.int 6 | |
%2452 = torch.prims.convert_element_type %2448, %int6_2723 : !torch.vtensor<[5120,1280],f16>, !torch.int -> !torch.vtensor<[5120,1280],f32> | |
%2453 = torch.aten.mm %2451, %2452 : !torch.vtensor<[2048,5120],f32>, !torch.vtensor<[5120,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2724 = torch.constant.int 1 | |
%2454 = torch.aten.mul.Scalar %2453, %int1_2724 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2725 = torch.constant.int 1 | |
%2455 = torch.aten.mul.Scalar %2450, %int1_2725 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2726 = torch.constant.int 1 | |
%2456 = torch.aten.add.Tensor %2454, %2455, %int1_2726 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2727 = torch.constant.int 5 | |
%2457 = torch.prims.convert_element_type %2456, %int5_2727 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2728 = torch.constant.int 2 | |
%int1024_2729 = torch.constant.int 1024 | |
%int1280_2730 = torch.constant.int 1280 | |
%2458 = torch.prim.ListConstruct %int2_2728, %int1024_2729, %int1280_2730 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2459 = torch.aten.view %2457, %2458 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2731 = torch.constant.int 1 | |
%2460 = torch.aten.add.Tensor %2459, %2411, %int1_2731 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int6_2732 = torch.constant.int 6 | |
%2461 = torch.prims.convert_element_type %2460, %int6_2732 : !torch.vtensor<[2,1024,1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int2_2733 = torch.constant.int 2 | |
%2462 = torch.prim.ListConstruct %int2_2733 : (!torch.int) -> !torch.list<int> | |
%int0_2734 = torch.constant.int 0 | |
%true_2735 = torch.constant.bool true | |
%result0_2736, %result1_2737 = torch.aten.var_mean.correction %2461, %2462, %int0_2734, %true_2735 : !torch.vtensor<[2,1024,1280],f32>, !torch.list<int>, !torch.int, !torch.bool -> !torch.vtensor<[2,1024,1],f32>, !torch.vtensor<[2,1024,1],f32> | |
%float1.000000e-05_2738 = torch.constant.float 1.000000e-05 | |
%int1_2739 = torch.constant.int 1 | |
%2463 = torch.aten.add.Scalar %result0_2736, %float1.000000e-05_2738, %int1_2739 : !torch.vtensor<[2,1024,1],f32>, !torch.float, !torch.int -> !torch.vtensor<[2,1024,1],f32> | |
%2464 = torch.aten.rsqrt %2463 : !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1],f32> | |
%int1_2740 = torch.constant.int 1 | |
%2465 = torch.aten.sub.Tensor %2460, %result1_2737, %int1_2740 : !torch.vtensor<[2,1024,1280],f16>, !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%2466 = torch.aten.mul.Tensor %2465, %2464 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[2,1024,1],f32> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight : tensor<1280xf16> | |
%2467 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%2468 = torch.aten.mul.Tensor %2466, %2467 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16> -> !torch.vtensor<[2,1024,1280],f32> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias : tensor<1280xf16> | |
%2469 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int1_2741 = torch.constant.int 1 | |
%2470 = torch.aten.add.Tensor %2468, %2469, %int1_2741 : !torch.vtensor<[2,1024,1280],f32>, !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[2,1024,1280],f32> | |
%int5_2742 = torch.constant.int 5 | |
%2471 = torch.prims.convert_element_type %2470, %int5_2742 : !torch.vtensor<[2,1024,1280],f32>, !torch.int -> !torch.vtensor<[2,1024,1280],f16> | |
%int5_2743 = torch.constant.int 5 | |
%2472 = torch.prims.convert_element_type %result1_2737, %int5_2743 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%int5_2744 = torch.constant.int 5 | |
%2473 = torch.prims.convert_element_type %2464, %int5_2744 : !torch.vtensor<[2,1024,1],f32>, !torch.int -> !torch.vtensor<[2,1024,1],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight : tensor<1280x1280xf16> | |
%2474 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_q.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2745 = torch.constant.int 0 | |
%int1_2746 = torch.constant.int 1 | |
%2475 = torch.aten.transpose.int %2474, %int0_2745, %int1_2746 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2747 = torch.constant.int 2048 | |
%int1280_2748 = torch.constant.int 1280 | |
%2476 = torch.prim.ListConstruct %int2048_2747, %int1280_2748 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2477 = torch.aten.view %2471, %2476 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2478 = torch.aten.mm %2477, %2475 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2749 = torch.constant.int 2 | |
%int1024_2750 = torch.constant.int 1024 | |
%int1280_2751 = torch.constant.int 1280 | |
%2479 = torch.prim.ListConstruct %int2_2749, %int1024_2750, %int1280_2751 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2480 = torch.aten.view %2478, %2479 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight : tensor<1280x1280xf16> | |
%2481 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_k.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2752 = torch.constant.int 0 | |
%int1_2753 = torch.constant.int 1 | |
%2482 = torch.aten.transpose.int %2481, %int0_2752, %int1_2753 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2754 = torch.constant.int 2048 | |
%int1280_2755 = torch.constant.int 1280 | |
%2483 = torch.prim.ListConstruct %int2048_2754, %int1280_2755 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2484 = torch.aten.view %2471, %2483 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2485 = torch.aten.mm %2484, %2482 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2756 = torch.constant.int 2 | |
%int1024_2757 = torch.constant.int 1024 | |
%int1280_2758 = torch.constant.int 1280 | |
%2486 = torch.prim.ListConstruct %int2_2756, %int1024_2757, %int1280_2758 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2487 = torch.aten.view %2485, %2486 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight : tensor<1280x1280xf16> | |
%2488 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_v.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2759 = torch.constant.int 0 | |
%int1_2760 = torch.constant.int 1 | |
%2489 = torch.aten.transpose.int %2488, %int0_2759, %int1_2760 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%int2048_2761 = torch.constant.int 2048 | |
%int1280_2762 = torch.constant.int 1280 | |
%2490 = torch.prim.ListConstruct %int2048_2761, %int1280_2762 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2491 = torch.aten.view %2471, %2490 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%2492 = torch.aten.mm %2491, %2489 : !torch.vtensor<[2048,1280],f16>, !torch.vtensor<[1280,1280],f16> -> !torch.vtensor<[2048,1280],f16> | |
%int2_2763 = torch.constant.int 2 | |
%int1024_2764 = torch.constant.int 1024 | |
%int1280_2765 = torch.constant.int 1280 | |
%2493 = torch.prim.ListConstruct %int2_2763, %int1024_2764, %int1280_2765 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2494 = torch.aten.view %2492, %2493 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2_2766 = torch.constant.int 2 | |
%int-1_2767 = torch.constant.int -1 | |
%int20_2768 = torch.constant.int 20 | |
%int64_2769 = torch.constant.int 64 | |
%2495 = torch.prim.ListConstruct %int2_2766, %int-1_2767, %int20_2768, %int64_2769 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2496 = torch.aten.view %2480, %2495 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2770 = torch.constant.int 1 | |
%int2_2771 = torch.constant.int 2 | |
%2497 = torch.aten.transpose.int %2496, %int1_2770, %int2_2771 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2772 = torch.constant.int 2 | |
%int-1_2773 = torch.constant.int -1 | |
%int20_2774 = torch.constant.int 20 | |
%int64_2775 = torch.constant.int 64 | |
%2498 = torch.prim.ListConstruct %int2_2772, %int-1_2773, %int20_2774, %int64_2775 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2499 = torch.aten.view %2487, %2498 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2776 = torch.constant.int 1 | |
%int2_2777 = torch.constant.int 2 | |
%2500 = torch.aten.transpose.int %2499, %int1_2776, %int2_2777 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%int2_2778 = torch.constant.int 2 | |
%int-1_2779 = torch.constant.int -1 | |
%int20_2780 = torch.constant.int 20 | |
%int64_2781 = torch.constant.int 64 | |
%2501 = torch.prim.ListConstruct %int2_2778, %int-1_2779, %int20_2780, %int64_2781 : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2502 = torch.aten.view %2494, %2501 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,20,64],f16> | |
%int1_2782 = torch.constant.int 1 | |
%int2_2783 = torch.constant.int 2 | |
%2503 = torch.aten.transpose.int %2502, %int1_2782, %int2_2783 : !torch.vtensor<[2,1024,20,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,20,1024,64],f16> | |
%float0.000000e00_2784 = torch.constant.float 0.000000e+00 | |
%false_2785 = torch.constant.bool false | |
%none_2786 = torch.constant.none | |
%none_2787 = torch.constant.none | |
%2504:2 = torch.operator "torch.aten._scaled_dot_product_flash_attention_for_cpu"(%2497, %2500, %2503, %float0.000000e00_2784, %false_2785, %none_2786, %none_2787) : (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024,64],f16>, !torch.float, !torch.bool, !torch.none, !torch.none) -> (!torch.vtensor<[2,20,1024,64],f16>, !torch.vtensor<[2,20,1024],f32>) | |
%2505 = torch.aten.detach %2504#0 : !torch.vtensor<[2,20,1024,64],f16> -> !torch.vtensor<[2,20,1024,64],f16> | |
%int1_2788 = torch.constant.int 1 | |
%int2_2789 = torch.constant.int 2 | |
%2506 = torch.aten.transpose.int %2504#0, %int1_2788, %int2_2789 : !torch.vtensor<[2,20,1024,64],f16>, !torch.int, !torch.int -> !torch.vtensor<[2,1024,20,64],f16> | |
%int2_2790 = torch.constant.int 2 | |
%int-1_2791 = torch.constant.int -1 | |
%int1280_2792 = torch.constant.int 1280 | |
%2507 = torch.prim.ListConstruct %int2_2790, %int-1_2791, %int1280_2792 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2508 = torch.aten.view %2506, %2507 : !torch.vtensor<[2,1024,20,64],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%int2048_2793 = torch.constant.int 2048 | |
%int1280_2794 = torch.constant.int 1280 | |
%2509 = torch.prim.ListConstruct %int2048_2793, %int1280_2794 : (!torch.int, !torch.int) -> !torch.list<int> | |
%2510 = torch.aten.view %2508, %2509 : !torch.vtensor<[2,1024,1280],f16>, !torch.list<int> -> !torch.vtensor<[2048,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight : tensor<1280x1280xf16> | |
%2511 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16> | |
%int0_2795 = torch.constant.int 0 | |
%int1_2796 = torch.constant.int 1 | |
%2512 = torch.aten.transpose.int %2511, %int0_2795, %int1_2796 : !torch.vtensor<[1280,1280],f16>, !torch.int, !torch.int -> !torch.vtensor<[1280,1280],f16> | |
%_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias = util.global.load @_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16> | |
%2513 = torch_c.from_builtin_tensor %_params.unet.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16> | |
%int6_2797 = torch.constant.int 6 | |
%2514 = torch.prims.convert_element_type %2513, %int6_2797 : !torch.vtensor<[1280],f16>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int6_2798 = torch.constant.int 6 | |
%2515 = torch.prims.convert_element_type %2510, %int6_2798 : !torch.vtensor<[2048,1280],f16>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int6_2799 = torch.constant.int 6 | |
%2516 = torch.prims.convert_element_type %2512, %int6_2799 : !torch.vtensor<[1280,1280],f16>, !torch.int -> !torch.vtensor<[1280,1280],f32> | |
%2517 = torch.aten.mm %2515, %2516 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280,1280],f32> -> !torch.vtensor<[2048,1280],f32> | |
%int1_2800 = torch.constant.int 1 | |
%2518 = torch.aten.mul.Scalar %2517, %int1_2800 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int1_2801 = torch.constant.int 1 | |
%2519 = torch.aten.mul.Scalar %2514, %int1_2801 : !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[1280],f32> | |
%int1_2802 = torch.constant.int 1 | |
%2520 = torch.aten.add.Tensor %2518, %2519, %int1_2802 : !torch.vtensor<[2048,1280],f32>, !torch.vtensor<[1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f32> | |
%int5_2803 = torch.constant.int 5 | |
%2521 = torch.prims.convert_element_type %2520, %int5_2803 : !torch.vtensor<[2048,1280],f32>, !torch.int -> !torch.vtensor<[2048,1280],f16> | |
%int2_2804 = torch.constant.int 2 | |
%int1024_2805 = torch.constant.int 1024 | |
%int1280_2806 = torch.constant.int 1280 | |
%2522 = torch.prim.ListConstruct %int2_2804, %int1024_2805, %int1280_2806 : (!torch.int, !torch.int, !torch.int) -> !torch.list<int> | |
%2523 = torch.aten.view %2521, %2522 : !torch.vtensor<[2048,1280],f16>, !torch.list<int> -> !torch.vtensor<[2,1024,1280],f16> | |
%none_2807 = torch.constant.none | |
%2524 = torch.aten.clone %2523, %none_2807 : !torch.vtensor<[2,1024,1280],f16>, !torch.none -> !torch.vtensor<[2,1024,1280],f16> | |
%float1.000000e00_2808 = torch.constant.float 1.000000e+00 | |
%2525 = torch.aten.div.Scalar %2524, %float1.000000e00_2808 : !torch.vtensor<[2,1024,1280],f16>, !torch.float -> !torch.vtensor<[2,1024,1280],f16> | |
%int1_2809 = torch.constant.int 1 | |
%2526 = torch.aten.add.Tensor %2525 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment