Skip to content

Instantly share code, notes, and snippets.

@pashu123
Created March 10, 2025 16:58
Show Gist options
  • Save pashu123/b4bfddf40a85ce44d46fff0ce138ddcc to your computer and use it in GitHub Desktop.
Save pashu123/b4bfddf40a85ce44d46fff0ce138ddcc to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
#map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
#map1 = affine_map<(d0, d1, d2, d3) -> (d1)>
module @module {
util.global private @__auto.time_embedding.linear_1.premul_input = #stream.parameter.named<"model"::"time_embedding.linear_1.premul_input"> : tensor<1x320xf16>
util.global private @__auto.time_embedding.linear_1.weight = #stream.parameter.named<"model"::"time_embedding.linear_1.weight"> : tensor<1280x320xf16>
util.global private @__auto.time_embedding.linear_1.bias = #stream.parameter.named<"model"::"time_embedding.linear_1.bias"> : tensor<1280xf16>
util.global private @__auto.time_embedding.linear_2.premul_input = #stream.parameter.named<"model"::"time_embedding.linear_2.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.time_embedding.linear_2.weight = #stream.parameter.named<"model"::"time_embedding.linear_2.weight"> : tensor<1280x1280xf16>
util.global private @__auto.time_embedding.linear_2.bias = #stream.parameter.named<"model"::"time_embedding.linear_2.bias"> : tensor<1280xf16>
util.global private @__auto.add_embedding.linear_1.premul_input = #stream.parameter.named<"model"::"add_embedding.linear_1.premul_input"> : tensor<1x2816xf16>
util.global private @__auto.add_embedding.linear_1.weight = #stream.parameter.named<"model"::"add_embedding.linear_1.weight"> : tensor<1280x2816xf16>
util.global private @__auto.add_embedding.linear_1.bias = #stream.parameter.named<"model"::"add_embedding.linear_1.bias"> : tensor<1280xf16>
util.global private @__auto.add_embedding.linear_2.premul_input = #stream.parameter.named<"model"::"add_embedding.linear_2.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.add_embedding.linear_2.weight = #stream.parameter.named<"model"::"add_embedding.linear_2.weight"> : tensor<1280x1280xf16>
util.global private @__auto.add_embedding.linear_2.bias = #stream.parameter.named<"model"::"add_embedding.linear_2.bias"> : tensor<1280xf16>
util.global private @__auto.conv_in.premul_input = #stream.parameter.named<"model"::"conv_in.premul_input"> : tensor<1x4x1x1xf16>
util.global private @__auto.conv_in.weight = #stream.parameter.named<"model"::"conv_in.weight"> : tensor<320x4x3x3xf16>
util.global private @__auto.conv_in.bias = #stream.parameter.named<"model"::"conv_in.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.norm1.weight"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.norm1.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.0.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.down_blocks.0.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.bias:qs"> : tensor<320xi32>
util.global private @"__auto.down_blocks.0.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.bias:d"> : tensor<320xf32>
util.global private @"__auto.down_blocks.0.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv1.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.down_blocks.0.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.down_blocks.0.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.time_emb_proj.weight"> : tensor<320x1280xf16>
util.global private @__auto.down_blocks.0.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.time_emb_proj.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.norm2.weight"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.norm2.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.0.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.down_blocks.0.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.bias:qs"> : tensor<320xi32>
util.global private @"__auto.down_blocks.0.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.bias:d"> : tensor<320xf32>
util.global private @"__auto.down_blocks.0.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.0.conv2.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.down_blocks.0.resnets.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.norm1.weight"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.norm1.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.0.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.down_blocks.0.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.bias:qs"> : tensor<320xi32>
util.global private @"__auto.down_blocks.0.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.bias:d"> : tensor<320xf32>
util.global private @"__auto.down_blocks.0.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv1.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.down_blocks.0.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.down_blocks.0.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.time_emb_proj.weight"> : tensor<320x1280xf16>
util.global private @__auto.down_blocks.0.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.time_emb_proj.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.norm2.weight"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.norm2.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.0.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.0.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.down_blocks.0.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.bias:qs"> : tensor<320xi32>
util.global private @"__auto.down_blocks.0.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.bias:d"> : tensor<320xf32>
util.global private @"__auto.down_blocks.0.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"down_blocks.0.resnets.1.conv2.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.down_blocks.0.downsamplers.0.conv.premul_input = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.0.downsamplers.0.conv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.downsamplers.0.conv.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.0.downsamplers.0.conv.weight:qs" = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.down_blocks.0.downsamplers.0.conv.bias:qs" = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.bias:qs"> : tensor<320xi32>
util.global private @"__auto.down_blocks.0.downsamplers.0.conv.bias:d" = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.bias:d"> : tensor<320xf32>
util.global private @"__auto.down_blocks.0.downsamplers.0.conv.weight:d" = #stream.parameter.named<"model"::"down_blocks.0.downsamplers.0.conv.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.down_blocks.1.resnets.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.norm1.weight"> : tensor<320xf16>
util.global private @__auto.down_blocks.1.resnets.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.norm1.bias"> : tensor<320xf16>
util.global private @__auto.down_blocks.1.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.1.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.weight:qs"> : tensor<640x320x3x3xi8>
util.global private @"__auto.down_blocks.1.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv1.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.down_blocks.1.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.down_blocks.1.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.time_emb_proj.weight"> : tensor<640x1280xf16>
util.global private @__auto.down_blocks.1.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.time_emb_proj.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.down_blocks.1.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.down_blocks.1.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv2.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.down_blocks.1.resnets.0.conv_shortcut.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.down_blocks.1.resnets.0.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.0.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.0.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.weight:qs"> : tensor<640x320x1x1xi8>
util.global private @"__auto.down_blocks.1.resnets.0.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.resnets.0.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.resnets.0.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.0.conv_shortcut.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.down_blocks.1.attentions.0.norm.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.norm.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.norm.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.norm.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.proj_in.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_in.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.proj_in.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.proj_in.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_in.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.proj_in.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_in.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.proj_in.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_in.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.0.proj_out.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_out.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.0.proj_out.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.0.proj_out.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_out.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.0.proj_out.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_out.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.0.proj_out.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.0.proj_out.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.resnets.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.down_blocks.1.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.down_blocks.1.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv1.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.down_blocks.1.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.down_blocks.1.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.time_emb_proj.weight"> : tensor<640x1280xf16>
util.global private @__auto.down_blocks.1.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.time_emb_proj.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.down_blocks.1.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.down_blocks.1.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"down_blocks.1.resnets.1.conv2.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.down_blocks.1.attentions.1.norm.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.norm.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.norm.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.norm.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.proj_in.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_in.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.proj_in.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.proj_in.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_in.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.proj_in.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_in.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.proj_in.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_in.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.attentions.1.proj_out.premul_input = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_out.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.down_blocks.1.attentions.1.proj_out.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.attentions.1.proj_out.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_out.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.down_blocks.1.attentions.1.proj_out.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_out.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.attentions.1.proj_out.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.attentions.1.proj_out.bias:d"> : tensor<640xf32>
util.global private @__auto.down_blocks.1.downsamplers.0.conv.premul_input = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.down_blocks.1.downsamplers.0.conv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.downsamplers.0.conv.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.1.downsamplers.0.conv.weight:qs" = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.down_blocks.1.downsamplers.0.conv.bias:qs" = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.bias:qs"> : tensor<640xi32>
util.global private @"__auto.down_blocks.1.downsamplers.0.conv.bias:d" = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.bias:d"> : tensor<640xf32>
util.global private @"__auto.down_blocks.1.downsamplers.0.conv.weight:d" = #stream.parameter.named<"model"::"down_blocks.1.downsamplers.0.conv.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.down_blocks.2.resnets.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.norm1.weight"> : tensor<640xf16>
util.global private @__auto.down_blocks.2.resnets.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.norm1.bias"> : tensor<640xf16>
util.global private @__auto.down_blocks.2.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.down_blocks.2.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.weight:qs"> : tensor<1280x640x3x3xi8>
util.global private @"__auto.down_blocks.2.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.down_blocks.2.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.down_blocks.2.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.down_blocks.2.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.down_blocks.2.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.down_blocks.2.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.down_blocks.2.resnets.0.conv_shortcut.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.down_blocks.2.resnets.0.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.0.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.0.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.weight:qs"> : tensor<1280x640x1x1xi8>
util.global private @"__auto.down_blocks.2.resnets.0.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.resnets.0.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.resnets.0.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.0.conv_shortcut.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.down_blocks.2.attentions.0.norm.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.norm.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.norm.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.norm.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.proj_in.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_in.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.proj_in.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.proj_in.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_in.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.proj_in.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_in.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.proj_in.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_in.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.0.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.1.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.2.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.3.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.4.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.5.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.6.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.7.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.8.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.transformer_blocks.9.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.0.proj_out.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_out.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.0.proj_out.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.0.proj_out.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_out.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.0.proj_out.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_out.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.0.proj_out.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.0.proj_out.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.resnets.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.down_blocks.2.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.down_blocks.2.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.down_blocks.2.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.down_blocks.2.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.down_blocks.2.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"down_blocks.2.resnets.1.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.down_blocks.2.attentions.1.norm.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.norm.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.norm.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.norm.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.proj_in.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_in.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.proj_in.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.proj_in.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_in.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.proj_in.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_in.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.proj_in.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_in.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.0.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.1.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.2.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.3.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.4.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.5.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.6.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.7.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.8.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm1.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm1.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm2.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm2.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm3.weight = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.norm3.bias = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.transformer_blocks.9.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.down_blocks.2.attentions.1.proj_out.premul_input = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_out.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.down_blocks.2.attentions.1.proj_out.q_input:scale" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.down_blocks.2.attentions.1.proj_out.weight:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_out.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.down_blocks.2.attentions.1.proj_out.bias:qs" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_out.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.down_blocks.2.attentions.1.proj_out.bias:d" = #stream.parameter.named<"model"::"down_blocks.2.attentions.1.proj_out.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.resnets.0.norm1.weight = #stream.parameter.named<"model"::"mid_block.resnets.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.0.norm1.bias = #stream.parameter.named<"model"::"mid_block.resnets.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.mid_block.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.mid_block.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.mid_block.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"mid_block.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.mid_block.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"mid_block.resnets.0.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.mid_block.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"mid_block.resnets.0.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.0.norm2.weight = #stream.parameter.named<"model"::"mid_block.resnets.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.0.norm2.bias = #stream.parameter.named<"model"::"mid_block.resnets.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.mid_block.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.mid_block.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"mid_block.resnets.0.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.mid_block.attentions.0.norm.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.norm.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.norm.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.norm.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.proj_in.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_in.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.proj_in.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.proj_in.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_in.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.proj_in.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_in.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.proj_in.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_in.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.0.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.1.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.2.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.3.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.4.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.5.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.6.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.7.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.8.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.norm1.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.norm1.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.norm2.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.norm2.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.norm3.weight = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.norm3.bias = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.transformer_blocks.9.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.attentions.0.proj_out.premul_input = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_out.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.mid_block.attentions.0.proj_out.q_input:scale" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.attentions.0.proj_out.weight:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_out.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.mid_block.attentions.0.proj_out.bias:qs" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_out.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.attentions.0.proj_out.bias:d" = #stream.parameter.named<"model"::"mid_block.attentions.0.proj_out.bias:d"> : tensor<1280xf32>
util.global private @__auto.mid_block.resnets.1.norm1.weight = #stream.parameter.named<"model"::"mid_block.resnets.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.1.norm1.bias = #stream.parameter.named<"model"::"mid_block.resnets.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.mid_block.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.mid_block.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.mid_block.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"mid_block.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.mid_block.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"mid_block.resnets.1.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.mid_block.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"mid_block.resnets.1.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.1.norm2.weight = #stream.parameter.named<"model"::"mid_block.resnets.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.1.norm2.bias = #stream.parameter.named<"model"::"mid_block.resnets.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.mid_block.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.mid_block.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.mid_block.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.mid_block.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.mid_block.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.mid_block.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"mid_block.resnets.1.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.norm1.weight"> : tensor<2560xf16>
util.global private @__auto.up_blocks.0.resnets.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.norm1.bias"> : tensor<2560xf16>
util.global private @__auto.up_blocks.0.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.premul_input"> : tensor<1x2560x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.weight:qs"> : tensor<1280x2560x3x3xi8>
util.global private @"__auto.up_blocks.0.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.0.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.up_blocks.0.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.up_blocks.0.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.0.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.premul_input"> : tensor<1x2560x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.0.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.0.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.0.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.weight:qs"> : tensor<1280x2560x1x1xi8>
util.global private @"__auto.up_blocks.0.resnets.0.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.0.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.0.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.0.conv_shortcut.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.attentions.0.norm.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.norm.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.norm.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.norm.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.proj_in.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_in.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.proj_in.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.proj_in.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_in.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.proj_in.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_in.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.proj_in.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_in.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.0.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.1.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.2.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.3.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.4.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.5.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.6.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.7.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.8.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.transformer_blocks.9.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.0.proj_out.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_out.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.0.proj_out.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.0.proj_out.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_out.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.0.proj_out.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_out.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.0.proj_out.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.0.proj_out.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.resnets.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.norm1.weight"> : tensor<2560xf16>
util.global private @__auto.up_blocks.0.resnets.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.norm1.bias"> : tensor<2560xf16>
util.global private @__auto.up_blocks.0.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.premul_input"> : tensor<1x2560x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.weight:qs"> : tensor<1280x2560x3x3xi8>
util.global private @"__auto.up_blocks.0.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.0.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.up_blocks.0.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.up_blocks.0.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.1.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.premul_input"> : tensor<1x2560x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.1.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.1.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.1.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.weight:qs"> : tensor<1280x2560x1x1xi8>
util.global private @"__auto.up_blocks.0.resnets.1.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.1.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.1.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.1.conv_shortcut.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.attentions.1.norm.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.norm.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.norm.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.norm.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.proj_in.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_in.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.proj_in.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.proj_in.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_in.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.proj_in.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_in.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.proj_in.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_in.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.0.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.1.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.2.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.3.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.4.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.5.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.6.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.7.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.8.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.transformer_blocks.9.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.1.proj_out.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_out.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.1.proj_out.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.1.proj_out.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_out.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.1.proj_out.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_out.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.1.proj_out.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.1.proj_out.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.resnets.2.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.norm1.weight"> : tensor<1920xf16>
util.global private @__auto.up_blocks.0.resnets.2.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.norm1.bias"> : tensor<1920xf16>
util.global private @__auto.up_blocks.0.resnets.2.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.premul_input"> : tensor<1x1920x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.2.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.2.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.2.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.weight:qs"> : tensor<1280x1920x3x3xi8>
util.global private @"__auto.up_blocks.0.resnets.2.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.2.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.2.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv1.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.2.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.0.resnets.2.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.time_emb_proj.weight"> : tensor<1280x1280xf16>
util.global private @__auto.up_blocks.0.resnets.2.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.time_emb_proj.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.2.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.2.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.resnets.2.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.2.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.2.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.2.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.up_blocks.0.resnets.2.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.2.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.2.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv2.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.resnets.2.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.premul_input"> : tensor<1x1920x1x1xf16>
util.global private @"__auto.up_blocks.0.resnets.2.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.2.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.resnets.2.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.weight:qs"> : tensor<1280x1920x1x1xi8>
util.global private @"__auto.up_blocks.0.resnets.2.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.resnets.2.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.resnets.2.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.resnets.2.conv_shortcut.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.0.attentions.2.norm.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.norm.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.norm.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.norm.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.proj_in.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_in.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.proj_in.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.proj_in.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_in.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.proj_in.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_in.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.proj_in.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_in.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.0.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.1.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.2.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.3.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.4.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.5.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.6.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.7.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.8.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.norm1.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.norm1.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_k.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_v.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_qkv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn1.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.norm2.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.norm2.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.norm2.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.norm2.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_q.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_k.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_k.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_k.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_v.weight:qs"> : tensor<1280x2048xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_v.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_kv.to_v.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.attn2.to_out.0.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.norm3.weight = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.norm3.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.norm3.bias = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.norm3.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.weight:qs"> : tensor<10240x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.bias:qs"> : tensor<10240xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.0.proj.bias:d"> : tensor<10240xf32>
util.global private @__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.premul_input"> : tensor<1x1x5120xf16>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.weight:qs"> : tensor<1280x5120xi8>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.transformer_blocks.9.ff.net.2.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.attentions.2.proj_out.premul_input = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_out.premul_input"> : tensor<1x1x1280xf16>
util.global private @"__auto.up_blocks.0.attentions.2.proj_out.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.attentions.2.proj_out.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_out.weight:qs"> : tensor<1280x1280xi8>
util.global private @"__auto.up_blocks.0.attentions.2.proj_out.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_out.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.attentions.2.proj_out.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.attentions.2.proj_out.bias:d"> : tensor<1280xf32>
util.global private @__auto.up_blocks.0.upsamplers.0.conv.premul_input = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.up_blocks.0.upsamplers.0.conv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.upsamplers.0.conv.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.0.upsamplers.0.conv.weight:qs" = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.weight:qs"> : tensor<1280x1280x3x3xi8>
util.global private @"__auto.up_blocks.0.upsamplers.0.conv.bias:qs" = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.bias:qs"> : tensor<1280xi32>
util.global private @"__auto.up_blocks.0.upsamplers.0.conv.bias:d" = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.bias:d"> : tensor<1280xf32>
util.global private @"__auto.up_blocks.0.upsamplers.0.conv.weight:d" = #stream.parameter.named<"model"::"up_blocks.0.upsamplers.0.conv.weight:d"> : tensor<1280x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.norm1.weight"> : tensor<1920xf16>
util.global private @__auto.up_blocks.1.resnets.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.norm1.bias"> : tensor<1920xf16>
util.global private @__auto.up_blocks.1.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.premul_input"> : tensor<1x1920x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.weight:qs"> : tensor<640x1920x3x3xi8>
util.global private @"__auto.up_blocks.1.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv1.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.1.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.time_emb_proj.weight"> : tensor<640x1280xf16>
util.global private @__auto.up_blocks.1.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.time_emb_proj.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.up_blocks.1.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv2.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.0.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.premul_input"> : tensor<1x1920x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.0.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.0.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.0.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.weight:qs"> : tensor<640x1920x1x1xi8>
util.global private @"__auto.up_blocks.1.resnets.0.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.0.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.0.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.0.conv_shortcut.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.attentions.0.norm.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.norm.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.norm.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.norm.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.proj_in.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_in.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.proj_in.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.proj_in.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_in.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.proj_in.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_in.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.proj_in.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_in.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.norm3.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.norm3.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.0.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.norm3.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.norm3.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.transformer_blocks.1.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.0.proj_out.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_out.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.0.proj_out.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.0.proj_out.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_out.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.0.proj_out.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_out.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.0.proj_out.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.0.proj_out.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.resnets.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.norm1.weight"> : tensor<1280xf16>
util.global private @__auto.up_blocks.1.resnets.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.norm1.bias"> : tensor<1280xf16>
util.global private @__auto.up_blocks.1.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.weight:qs"> : tensor<640x1280x3x3xi8>
util.global private @"__auto.up_blocks.1.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv1.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.1.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.time_emb_proj.weight"> : tensor<640x1280xf16>
util.global private @__auto.up_blocks.1.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.time_emb_proj.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.up_blocks.1.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv2.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.1.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.premul_input"> : tensor<1x1280x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.1.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.1.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.1.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.weight:qs"> : tensor<640x1280x1x1xi8>
util.global private @"__auto.up_blocks.1.resnets.1.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.1.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.1.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.1.conv_shortcut.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.attentions.1.norm.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.norm.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.norm.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.norm.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.proj_in.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_in.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.proj_in.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.proj_in.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_in.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.proj_in.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_in.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.proj_in.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_in.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.norm3.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.norm3.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.0.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.norm3.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.norm3.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.transformer_blocks.1.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.1.proj_out.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_out.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.1.proj_out.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.1.proj_out.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_out.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.1.proj_out.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_out.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.1.proj_out.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.1.proj_out.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.resnets.2.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.norm1.weight"> : tensor<960xf16>
util.global private @__auto.up_blocks.1.resnets.2.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.norm1.bias"> : tensor<960xf16>
util.global private @__auto.up_blocks.1.resnets.2.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.premul_input"> : tensor<1x960x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.2.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.2.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.2.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.weight:qs"> : tensor<640x960x3x3xi8>
util.global private @"__auto.up_blocks.1.resnets.2.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.2.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.2.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv1.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.2.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.1.resnets.2.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.time_emb_proj.weight"> : tensor<640x1280xf16>
util.global private @__auto.up_blocks.1.resnets.2.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.time_emb_proj.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.2.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.2.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.resnets.2.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.2.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.2.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.2.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.up_blocks.1.resnets.2.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.2.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.2.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv2.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.resnets.2.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.premul_input"> : tensor<1x960x1x1xf16>
util.global private @"__auto.up_blocks.1.resnets.2.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.2.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.resnets.2.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.weight:qs"> : tensor<640x960x1x1xi8>
util.global private @"__auto.up_blocks.1.resnets.2.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.resnets.2.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.resnets.2.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.resnets.2.conv_shortcut.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.1.attentions.2.norm.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.norm.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.norm.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.norm.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.proj_in.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_in.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.proj_in.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_in.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.proj_in.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_in.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.proj_in.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_in.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.proj_in.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_in.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.norm3.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.norm3.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.norm3.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.norm3.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.0.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_qkv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn1.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.norm2.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.norm2.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.q_input:scale"> : tensor<f32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.premul_input"> : tensor<1x1x2048xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_q.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_k.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.weight:qs"> : tensor<640x2048xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_kv.to_v.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.attn2.to_out.0.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.norm3.weight = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.norm3.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.norm3.bias = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.norm3.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.weight:qs"> : tensor<5120x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:qs"> : tensor<5120xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.0.proj.bias:d"> : tensor<5120xf32>
util.global private @__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.premul_input"> : tensor<1x1x2560xf16>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.weight:qs"> : tensor<640x2560xi8>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.transformer_blocks.1.ff.net.2.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.attentions.2.proj_out.premul_input = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_out.premul_input"> : tensor<1x1x640xf16>
util.global private @"__auto.up_blocks.1.attentions.2.proj_out.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_out.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.attentions.2.proj_out.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_out.weight:qs"> : tensor<640x640xi8>
util.global private @"__auto.up_blocks.1.attentions.2.proj_out.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_out.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.attentions.2.proj_out.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.attentions.2.proj_out.bias:d"> : tensor<640xf32>
util.global private @__auto.up_blocks.1.upsamplers.0.conv.premul_input = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.1.upsamplers.0.conv.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.upsamplers.0.conv.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.1.upsamplers.0.conv.weight:qs" = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.weight:qs"> : tensor<640x640x3x3xi8>
util.global private @"__auto.up_blocks.1.upsamplers.0.conv.bias:qs" = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.bias:qs"> : tensor<640xi32>
util.global private @"__auto.up_blocks.1.upsamplers.0.conv.bias:d" = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.bias:d"> : tensor<640xf32>
util.global private @"__auto.up_blocks.1.upsamplers.0.conv.weight:d" = #stream.parameter.named<"model"::"up_blocks.1.upsamplers.0.conv.weight:d"> : tensor<640x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.0.norm1.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.norm1.weight"> : tensor<960xf16>
util.global private @__auto.up_blocks.2.resnets.0.norm1.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.norm1.bias"> : tensor<960xf16>
util.global private @__auto.up_blocks.2.resnets.0.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.premul_input"> : tensor<1x960x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.0.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.0.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.0.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.weight:qs"> : tensor<320x960x3x3xi8>
util.global private @"__auto.up_blocks.2.resnets.0.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.0.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.0.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv1.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.0.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.2.resnets.0.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.time_emb_proj.weight"> : tensor<320x1280xf16>
util.global private @__auto.up_blocks.2.resnets.0.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.time_emb_proj.bias"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.0.norm2.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.norm2.weight"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.0.norm2.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.norm2.bias"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.0.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.0.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.0.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.0.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.up_blocks.2.resnets.0.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.0.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.0.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv2.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.0.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.premul_input"> : tensor<1x960x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.0.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.0.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.0.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.weight:qs"> : tensor<320x960x1x1xi8>
util.global private @"__auto.up_blocks.2.resnets.0.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.0.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.0.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.0.conv_shortcut.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.1.norm1.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.2.resnets.1.norm1.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.2.resnets.1.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.1.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.1.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.1.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.weight:qs"> : tensor<320x640x3x3xi8>
util.global private @"__auto.up_blocks.2.resnets.1.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.1.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.1.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv1.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.1.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.2.resnets.1.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.time_emb_proj.weight"> : tensor<320x1280xf16>
util.global private @__auto.up_blocks.2.resnets.1.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.time_emb_proj.bias"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.1.norm2.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.norm2.weight"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.1.norm2.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.norm2.bias"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.1.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.1.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.1.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.1.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.up_blocks.2.resnets.1.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.1.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.1.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv2.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.1.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.1.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.1.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.1.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.weight:qs"> : tensor<320x640x1x1xi8>
util.global private @"__auto.up_blocks.2.resnets.1.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.1.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.1.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.1.conv_shortcut.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.2.norm1.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.norm1.weight"> : tensor<640xf16>
util.global private @__auto.up_blocks.2.resnets.2.norm1.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.norm1.bias"> : tensor<640xf16>
util.global private @__auto.up_blocks.2.resnets.2.conv1.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.2.conv1.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.2.conv1.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.2.conv1.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.weight:qs"> : tensor<320x640x3x3xi8>
util.global private @"__auto.up_blocks.2.resnets.2.conv1.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.2.conv1.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.2.conv1.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv1.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.2.time_emb_proj.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.time_emb_proj.premul_input"> : tensor<1x1280xf16>
util.global private @__auto.up_blocks.2.resnets.2.time_emb_proj.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.time_emb_proj.weight"> : tensor<320x1280xf16>
util.global private @__auto.up_blocks.2.resnets.2.time_emb_proj.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.time_emb_proj.bias"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.2.norm2.weight = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.norm2.weight"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.2.norm2.bias = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.norm2.bias"> : tensor<320xf16>
util.global private @__auto.up_blocks.2.resnets.2.conv2.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.premul_input"> : tensor<1x320x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.2.conv2.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.2.conv2.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.2.conv2.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.weight:qs"> : tensor<320x320x3x3xi8>
util.global private @"__auto.up_blocks.2.resnets.2.conv2.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.2.conv2.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.2.conv2.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv2.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.up_blocks.2.resnets.2.conv_shortcut.premul_input = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.premul_input"> : tensor<1x640x1x1xf16>
util.global private @"__auto.up_blocks.2.resnets.2.conv_shortcut.q_input:scale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.q_input:scale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.2.conv_shortcut.q_input:rscale" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.q_input:rscale"> : tensor<f32>
util.global private @"__auto.up_blocks.2.resnets.2.conv_shortcut.weight:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.weight:qs"> : tensor<320x640x1x1xi8>
util.global private @"__auto.up_blocks.2.resnets.2.conv_shortcut.bias:qs" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.bias:qs"> : tensor<320xi32>
util.global private @"__auto.up_blocks.2.resnets.2.conv_shortcut.bias:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.bias:d"> : tensor<320xf32>
util.global private @"__auto.up_blocks.2.resnets.2.conv_shortcut.weight:d" = #stream.parameter.named<"model"::"up_blocks.2.resnets.2.conv_shortcut.weight:d"> : tensor<320x1x1x1xf32>
util.global private @__auto.conv_norm_out.weight = #stream.parameter.named<"model"::"conv_norm_out.weight"> : tensor<320xf16>
util.global private @__auto.conv_norm_out.bias = #stream.parameter.named<"model"::"conv_norm_out.bias"> : tensor<320xf16>
util.global private @__auto.conv_out.premul_input = #stream.parameter.named<"model"::"conv_out.premul_input"> : tensor<1x320x1x1xf16>
util.global private @__auto.conv_out.weight = #stream.parameter.named<"model"::"conv_out.weight"> : tensor<4x320x3x3xf16>
util.global private @__auto.conv_out.bias = #stream.parameter.named<"model"::"conv_out.bias"> : tensor<4xf16>
func.func @main(%arg0: !torch.vtensor<[1,4,128,128],f16>, %arg1: !torch.vtensor<[1],si32>, %arg2: !torch.vtensor<[2,64,2048],f16>, %arg3: !torch.vtensor<[2,1280],f16>, %arg4: !torch.vtensor<[2,6],f16>, %arg5: !torch.vtensor<[1],f16>) -> !torch.vtensor<[1,4,128,128],f16> attributes {torch.assume_strict_symbolic_shapes} {
%__auto.time_embedding.linear_1.premul_input = util.global.load @__auto.time_embedding.linear_1.premul_input : tensor<1x320xf16>
%0 = torch_c.from_builtin_tensor %__auto.time_embedding.linear_1.premul_input : tensor<1x320xf16> -> !torch.vtensor<[1,320],f16>
%__auto.time_embedding.linear_1.weight = util.global.load @__auto.time_embedding.linear_1.weight : tensor<1280x320xf16>
%1 = torch_c.from_builtin_tensor %__auto.time_embedding.linear_1.weight : tensor<1280x320xf16> -> !torch.vtensor<[1280,320],f16>
%__auto.time_embedding.linear_1.bias = util.global.load @__auto.time_embedding.linear_1.bias : tensor<1280xf16>
%2 = torch_c.from_builtin_tensor %__auto.time_embedding.linear_1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16>
%__auto.time_embedding.linear_2.premul_input = util.global.load @__auto.time_embedding.linear_2.premul_input : tensor<1x1280xf16>
%3 = torch_c.from_builtin_tensor %__auto.time_embedding.linear_2.premul_input : tensor<1x1280xf16> -> !torch.vtensor<[1,1280],f16>
%__auto.time_embedding.linear_2.weight = util.global.load @__auto.time_embedding.linear_2.weight : tensor<1280x1280xf16>
%4 = torch_c.from_builtin_tensor %__auto.time_embedding.linear_2.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16>
%__auto.time_embedding.linear_2.bias = util.global.load @__auto.time_embedding.linear_2.bias : tensor<1280xf16>
%5 = torch_c.from_builtin_tensor %__auto.time_embedding.linear_2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16>
%__auto.add_embedding.linear_1.premul_input = util.global.load @__auto.add_embedding.linear_1.premul_input : tensor<1x2816xf16>
%6 = torch_c.from_builtin_tensor %__auto.add_embedding.linear_1.premul_input : tensor<1x2816xf16> -> !torch.vtensor<[1,2816],f16>
%__auto.add_embedding.linear_1.weight = util.global.load @__auto.add_embedding.linear_1.weight : tensor<1280x2816xf16>
%7 = torch_c.from_builtin_tensor %__auto.add_embedding.linear_1.weight : tensor<1280x2816xf16> -> !torch.vtensor<[1280,2816],f16>
%__auto.add_embedding.linear_1.bias = util.global.load @__auto.add_embedding.linear_1.bias : tensor<1280xf16>
%8 = torch_c.from_builtin_tensor %__auto.add_embedding.linear_1.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16>
%__auto.add_embedding.linear_2.premul_input = util.global.load @__auto.add_embedding.linear_2.premul_input : tensor<1x1280xf16>
%9 = torch_c.from_builtin_tensor %__auto.add_embedding.linear_2.premul_input : tensor<1x1280xf16> -> !torch.vtensor<[1,1280],f16>
%__auto.add_embedding.linear_2.weight = util.global.load @__auto.add_embedding.linear_2.weight : tensor<1280x1280xf16>
%10 = torch_c.from_builtin_tensor %__auto.add_embedding.linear_2.weight : tensor<1280x1280xf16> -> !torch.vtensor<[1280,1280],f16>
%__auto.add_embedding.linear_2.bias = util.global.load @__auto.add_embedding.linear_2.bias : tensor<1280xf16>
%11 = torch_c.from_builtin_tensor %__auto.add_embedding.linear_2.bias : tensor<1280xf16> -> !torch.vtensor<[1280],f16>
%__auto.conv_in.premul_input = util.global.load @__auto.conv_in.premul_input : tensor<1x4x1x1xf16>
%12 = torch_c.from_builtin_tensor %__auto.conv_in.premul_input : tensor<1x4x1x1xf16> -> !torch.vtensor<[1,4,1,1],f16>
%__auto.conv_in.weight = util.global.load @__auto.conv_in.weight : tensor<320x4x3x3xf16>
%13 = torch_c.from_builtin_tensor %__auto.conv_in.weight : tensor<320x4x3x3xf16> -> !torch.vtensor<[320,4,3,3],f16>
%__auto.conv_in.bias = util.global.load @__auto.conv_in.bias : tensor<320xf16>
%14 = torch_c.from_builtin_tensor %__auto.conv_in.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.0.norm1.weight = util.global.load @__auto.down_blocks.0.resnets.0.norm1.weight : tensor<320xf16>
%15 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.norm1.weight : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.0.norm1.bias = util.global.load @__auto.down_blocks.0.resnets.0.norm1.bias : tensor<320xf16>
%16 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.norm1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.0.conv1.premul_input = util.global.load @__auto.down_blocks.0.resnets.0.conv1.premul_input : tensor<1x320x1x1xf16>
%17 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.premul_input : tensor<1x320x1x1xf16> -> !torch.vtensor<[1,320,1,1],f16>
%__auto.down_blocks.0.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.q_input:scale" : tensor<f32>
%18 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.q_input3Ascale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.q_input:rscale" : tensor<f32>
%19 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.q_input3Arscale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.weight:qs" : tensor<320x320x3x3xi8>
%20 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.weight3Aqs : tensor<320x320x3x3xi8> -> !torch.vtensor<[320,320,3,3],si8>
%__auto.down_blocks.0.resnets.0.conv1.bias3Aqs = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.bias:qs" : tensor<320xi32>
%21 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.bias3Aqs : tensor<320xi32> -> !torch.vtensor<[320],si32>
%__auto.down_blocks.0.resnets.0.conv1.bias3Ad = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.bias:d" : tensor<320xf32>
%22 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.bias3Ad : tensor<320xf32> -> !torch.vtensor<[320],f32>
%__auto.down_blocks.0.resnets.0.conv1.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.0.conv1.weight:d" : tensor<320x1x1x1xf32>
%23 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv1.weight3Ad : tensor<320x1x1x1xf32> -> !torch.vtensor<[320,1,1,1],f32>
%__auto.down_blocks.0.resnets.0.time_emb_proj.premul_input = util.global.load @__auto.down_blocks.0.resnets.0.time_emb_proj.premul_input : tensor<1x1280xf16>
%24 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.time_emb_proj.premul_input : tensor<1x1280xf16> -> !torch.vtensor<[1,1280],f16>
%__auto.down_blocks.0.resnets.0.time_emb_proj.weight = util.global.load @__auto.down_blocks.0.resnets.0.time_emb_proj.weight : tensor<320x1280xf16>
%25 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.time_emb_proj.weight : tensor<320x1280xf16> -> !torch.vtensor<[320,1280],f16>
%__auto.down_blocks.0.resnets.0.time_emb_proj.bias = util.global.load @__auto.down_blocks.0.resnets.0.time_emb_proj.bias : tensor<320xf16>
%26 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.time_emb_proj.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.0.norm2.weight = util.global.load @__auto.down_blocks.0.resnets.0.norm2.weight : tensor<320xf16>
%27 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.norm2.weight : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.0.norm2.bias = util.global.load @__auto.down_blocks.0.resnets.0.norm2.bias : tensor<320xf16>
%28 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.norm2.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.0.conv2.premul_input = util.global.load @__auto.down_blocks.0.resnets.0.conv2.premul_input : tensor<1x320x1x1xf16>
%29 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.premul_input : tensor<1x320x1x1xf16> -> !torch.vtensor<[1,320,1,1],f16>
%__auto.down_blocks.0.resnets.0.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.q_input:scale" : tensor<f32>
%30 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.q_input3Ascale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.0.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.q_input:rscale" : tensor<f32>
%31 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.q_input3Arscale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.0.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.weight:qs" : tensor<320x320x3x3xi8>
%32 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.weight3Aqs : tensor<320x320x3x3xi8> -> !torch.vtensor<[320,320,3,3],si8>
%__auto.down_blocks.0.resnets.0.conv2.bias3Aqs = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.bias:qs" : tensor<320xi32>
%33 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.bias3Aqs : tensor<320xi32> -> !torch.vtensor<[320],si32>
%__auto.down_blocks.0.resnets.0.conv2.bias3Ad = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.bias:d" : tensor<320xf32>
%34 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.bias3Ad : tensor<320xf32> -> !torch.vtensor<[320],f32>
%__auto.down_blocks.0.resnets.0.conv2.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.0.conv2.weight:d" : tensor<320x1x1x1xf32>
%35 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.0.conv2.weight3Ad : tensor<320x1x1x1xf32> -> !torch.vtensor<[320,1,1,1],f32>
%__auto.down_blocks.0.resnets.1.norm1.weight = util.global.load @__auto.down_blocks.0.resnets.1.norm1.weight : tensor<320xf16>
%36 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.norm1.weight : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.1.norm1.bias = util.global.load @__auto.down_blocks.0.resnets.1.norm1.bias : tensor<320xf16>
%37 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.norm1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.1.conv1.premul_input = util.global.load @__auto.down_blocks.0.resnets.1.conv1.premul_input : tensor<1x320x1x1xf16>
%38 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.premul_input : tensor<1x320x1x1xf16> -> !torch.vtensor<[1,320,1,1],f16>
%__auto.down_blocks.0.resnets.1.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.q_input:scale" : tensor<f32>
%39 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.q_input3Ascale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.1.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.q_input:rscale" : tensor<f32>
%40 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.q_input3Arscale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.1.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.weight:qs" : tensor<320x320x3x3xi8>
%41 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.weight3Aqs : tensor<320x320x3x3xi8> -> !torch.vtensor<[320,320,3,3],si8>
%__auto.down_blocks.0.resnets.1.conv1.bias3Aqs = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.bias:qs" : tensor<320xi32>
%42 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.bias3Aqs : tensor<320xi32> -> !torch.vtensor<[320],si32>
%__auto.down_blocks.0.resnets.1.conv1.bias3Ad = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.bias:d" : tensor<320xf32>
%43 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.bias3Ad : tensor<320xf32> -> !torch.vtensor<[320],f32>
%__auto.down_blocks.0.resnets.1.conv1.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.1.conv1.weight:d" : tensor<320x1x1x1xf32>
%44 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv1.weight3Ad : tensor<320x1x1x1xf32> -> !torch.vtensor<[320,1,1,1],f32>
%__auto.down_blocks.0.resnets.1.time_emb_proj.premul_input = util.global.load @__auto.down_blocks.0.resnets.1.time_emb_proj.premul_input : tensor<1x1280xf16>
%45 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.time_emb_proj.premul_input : tensor<1x1280xf16> -> !torch.vtensor<[1,1280],f16>
%__auto.down_blocks.0.resnets.1.time_emb_proj.weight = util.global.load @__auto.down_blocks.0.resnets.1.time_emb_proj.weight : tensor<320x1280xf16>
%46 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.time_emb_proj.weight : tensor<320x1280xf16> -> !torch.vtensor<[320,1280],f16>
%__auto.down_blocks.0.resnets.1.time_emb_proj.bias = util.global.load @__auto.down_blocks.0.resnets.1.time_emb_proj.bias : tensor<320xf16>
%47 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.time_emb_proj.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.1.norm2.weight = util.global.load @__auto.down_blocks.0.resnets.1.norm2.weight : tensor<320xf16>
%48 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.norm2.weight : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.1.norm2.bias = util.global.load @__auto.down_blocks.0.resnets.1.norm2.bias : tensor<320xf16>
%49 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.norm2.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.0.resnets.1.conv2.premul_input = util.global.load @__auto.down_blocks.0.resnets.1.conv2.premul_input : tensor<1x320x1x1xf16>
%50 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.premul_input : tensor<1x320x1x1xf16> -> !torch.vtensor<[1,320,1,1],f16>
%__auto.down_blocks.0.resnets.1.conv2.q_input3Ascale = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.q_input:scale" : tensor<f32>
%51 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.q_input3Ascale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.1.conv2.q_input3Arscale = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.q_input:rscale" : tensor<f32>
%52 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.q_input3Arscale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.resnets.1.conv2.weight3Aqs = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.weight:qs" : tensor<320x320x3x3xi8>
%53 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.weight3Aqs : tensor<320x320x3x3xi8> -> !torch.vtensor<[320,320,3,3],si8>
%__auto.down_blocks.0.resnets.1.conv2.bias3Aqs = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.bias:qs" : tensor<320xi32>
%54 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.bias3Aqs : tensor<320xi32> -> !torch.vtensor<[320],si32>
%__auto.down_blocks.0.resnets.1.conv2.bias3Ad = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.bias:d" : tensor<320xf32>
%55 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.bias3Ad : tensor<320xf32> -> !torch.vtensor<[320],f32>
%__auto.down_blocks.0.resnets.1.conv2.weight3Ad = util.global.load @"__auto.down_blocks.0.resnets.1.conv2.weight:d" : tensor<320x1x1x1xf32>
%56 = torch_c.from_builtin_tensor %__auto.down_blocks.0.resnets.1.conv2.weight3Ad : tensor<320x1x1x1xf32> -> !torch.vtensor<[320,1,1,1],f32>
%__auto.down_blocks.0.downsamplers.0.conv.premul_input = util.global.load @__auto.down_blocks.0.downsamplers.0.conv.premul_input : tensor<1x320x1x1xf16>
%57 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.premul_input : tensor<1x320x1x1xf16> -> !torch.vtensor<[1,320,1,1],f16>
%__auto.down_blocks.0.downsamplers.0.conv.q_input3Ascale = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.q_input:scale" : tensor<f32>
%58 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.q_input3Ascale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.downsamplers.0.conv.q_input3Arscale = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.q_input:rscale" : tensor<f32>
%59 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.q_input3Arscale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.0.downsamplers.0.conv.weight3Aqs = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.weight:qs" : tensor<320x320x3x3xi8>
%60 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.weight3Aqs : tensor<320x320x3x3xi8> -> !torch.vtensor<[320,320,3,3],si8>
%__auto.down_blocks.0.downsamplers.0.conv.bias3Aqs = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.bias:qs" : tensor<320xi32>
%61 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.bias3Aqs : tensor<320xi32> -> !torch.vtensor<[320],si32>
%__auto.down_blocks.0.downsamplers.0.conv.bias3Ad = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.bias:d" : tensor<320xf32>
%62 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.bias3Ad : tensor<320xf32> -> !torch.vtensor<[320],f32>
%__auto.down_blocks.0.downsamplers.0.conv.weight3Ad = util.global.load @"__auto.down_blocks.0.downsamplers.0.conv.weight:d" : tensor<320x1x1x1xf32>
%63 = torch_c.from_builtin_tensor %__auto.down_blocks.0.downsamplers.0.conv.weight3Ad : tensor<320x1x1x1xf32> -> !torch.vtensor<[320,1,1,1],f32>
%__auto.down_blocks.1.resnets.0.norm1.weight = util.global.load @__auto.down_blocks.1.resnets.0.norm1.weight : tensor<320xf16>
%64 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.norm1.weight : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.1.resnets.0.norm1.bias = util.global.load @__auto.down_blocks.1.resnets.0.norm1.bias : tensor<320xf16>
%65 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.norm1.bias : tensor<320xf16> -> !torch.vtensor<[320],f16>
%__auto.down_blocks.1.resnets.0.conv1.premul_input = util.global.load @__auto.down_blocks.1.resnets.0.conv1.premul_input : tensor<1x320x1x1xf16>
%66 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.conv1.premul_input : tensor<1x320x1x1xf16> -> !torch.vtensor<[1,320,1,1],f16>
%__auto.down_blocks.1.resnets.0.conv1.q_input3Ascale = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.q_input:scale" : tensor<f32>
%67 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.conv1.q_input3Ascale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.1.resnets.0.conv1.q_input3Arscale = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.q_input:rscale" : tensor<f32>
%68 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.conv1.q_input3Arscale : tensor<f32> -> !torch.vtensor<[],f32>
%__auto.down_blocks.1.resnets.0.conv1.weight3Aqs = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.weight:qs" : tensor<640x320x3x3xi8>
%69 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.conv1.weight3Aqs : tensor<640x320x3x3xi8> -> !torch.vtensor<[640,320,3,3],si8>
%__auto.down_blocks.1.resnets.0.conv1.bias3Aqs = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.bias:qs" : tensor<640xi32>
%70 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.conv1.bias3Aqs : tensor<640xi32> -> !torch.vtensor<[640],si32>
%__auto.down_blocks.1.resnets.0.conv1.bias3Ad = util.global.load @"__auto.down_blocks.1.resnets.0.conv1.bias:d" : tensor<640xf32>
%71 = torch_c.from_builtin_tensor %__auto.down_blocks.1.resnets.0.conv1.bias3Ad : tensor<640xf32> -> !torch.vtensor<[640],f32>
%__
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment